HBASE-1960 Master should wait for DFS to come up when creating hbase.version; use alternate strategy for waiting for DNs

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1079501 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2011-03-08 19:39:54 +00:00
parent d4cbacf40c
commit 3d4a190562
3 changed files with 51 additions and 25 deletions

View File

@ -60,6 +60,8 @@ Release 0.91.0 - Unreleased
HBASE-3605 Fix balancer log message HBASE-3605 Fix balancer log message
HBASE-3603 Remove -XX:+HeapDumpOnOutOfMemoryError autodump of heap option HBASE-3603 Remove -XX:+HeapDumpOnOutOfMemoryError autodump of heap option
on OOME on OOME
HBASE-1960 Master should wait for DFS to come up when creating
hbase.version; use alternate strategy for waiting for DNs
IMPROVEMENTS IMPROVEMENTS
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack) HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)

View File

@ -231,7 +231,15 @@ public class MasterFileSystem {
// Filesystem is good. Go ahead and check for hbase.rootdir. // Filesystem is good. Go ahead and check for hbase.rootdir.
if (!fs.exists(rd)) { if (!fs.exists(rd)) {
fs.mkdirs(rd); fs.mkdirs(rd);
FSUtils.setVersion(fs, rd); // DFS leaves safe mode with 0 DNs when there are 0 blocks.
// We used to handle this by checking the current DN count and waiting until
// it is nonzero. With security, the check for datanode count doesn't work --
// it is a privileged op. So instead we adopt the strategy of the jobtracker
// and simply retry file creation during bootstrap indefinitely. As soon as
// there is one datanode it will succeed. Permission problems should have
// already been caught by mkdirs above.
FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
10 * 1000));
} else { } else {
FSUtils.checkVersion(fs, rd, true); FSUtils.checkVersion(fs, rd, true);
} }

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.util.StringUtils;
import java.io.DataInputStream; import java.io.DataInputStream;
import java.io.EOFException; import java.io.EOFException;
@ -207,7 +208,20 @@ public class FSUtils {
*/ */
public static void setVersion(FileSystem fs, Path rootdir) public static void setVersion(FileSystem fs, Path rootdir)
throws IOException { throws IOException {
setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION); setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0);
}
/**
* Sets version of file system
*
* @param fs filesystem object
* @param rootdir hbase root
* @param wait time to wait for retry
* @throws IOException e
*/
public static void setVersion(FileSystem fs, Path rootdir, int wait)
throws IOException {
setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait);
} }
/** /**
@ -216,15 +230,33 @@ public class FSUtils {
* @param fs filesystem object * @param fs filesystem object
* @param rootdir hbase root directory * @param rootdir hbase root directory
* @param version version to set * @param version version to set
* @param wait time to wait for retry
* @throws IOException e * @throws IOException e
*/ */
public static void setVersion(FileSystem fs, Path rootdir, String version) public static void setVersion(FileSystem fs, Path rootdir, String version,
throws IOException { int wait) throws IOException {
FSDataOutputStream s = while (true) try {
fs.create(new Path(rootdir, HConstants.VERSION_FILE_NAME)); FSDataOutputStream s =
s.writeUTF(version); fs.create(new Path(rootdir, HConstants.VERSION_FILE_NAME));
s.close(); s.writeUTF(version);
LOG.debug("Created version file at " + rootdir.toString() + " set its version at:" + version); s.close();
LOG.debug("Created version file at " + rootdir.toString() +
" set its version at:" + version);
return;
} catch (IOException e) {
if (wait > 0) {
LOG.warn("Unable to create version file at " + rootdir.toString() +
", retrying: " + StringUtils.stringifyException(e));
try {
Thread.sleep(wait);
} catch (InterruptedException ex) {
// ignore
}
} else {
// rethrow
throw e;
}
}
} }
/** /**
@ -262,22 +294,6 @@ public class FSUtils {
FileSystem fs = FileSystem.get(conf); FileSystem fs = FileSystem.get(conf);
if (!(fs instanceof DistributedFileSystem)) return; if (!(fs instanceof DistributedFileSystem)) return;
DistributedFileSystem dfs = (DistributedFileSystem)fs; DistributedFileSystem dfs = (DistributedFileSystem)fs;
// Are there any data nodes up yet?
// Currently the safe mode check falls through if the namenode is up but no
// datanodes have reported in yet.
try {
while (dfs.getDataNodeStats().length == 0) {
LOG.info("Waiting for dfs to come up...");
try {
Thread.sleep(wait);
} catch (InterruptedException e) {
//continue
}
}
} catch (IOException e) {
// getDataNodeStats can fail if superuser privilege is required to run
// the datanode report, just ignore it
}
// Make sure dfs is not in safe mode // Make sure dfs is not in safe mode
while (dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET)) { while (dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET)) {
LOG.info("Waiting for dfs to exit safe mode..."); LOG.info("Waiting for dfs to exit safe mode...");