HBASE-16985 TestClusterId failed due to wrong hbase rootdir

Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
Guanghao Zhang 2016-11-02 10:13:37 +08:00 committed by Michael Stack
parent de3a51263d
commit 1462cf77ef
2 changed files with 29 additions and 12 deletions

View File

@ -406,7 +406,7 @@ public class HMaster extends HRegionServer implements MasterServices {
this.rsFatals = new MemoryBoundedLogMessageBuffer( this.rsFatals = new MemoryBoundedLogMessageBuffer(
conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024)); conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) + LOG.info("hbase.rootdir=" + getRootDir() +
", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false)); ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
// Disable usage of meta replicas in the master // Disable usage of meta replicas in the master

View File

@ -583,16 +583,7 @@ public class HRegionServer extends HasThread implements
} }
}; };
// Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else initializeFileSystem();
// underlying hadoop hdfs accessors will be going against wrong filesystem
// (unless all is set to defaults).
FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf));
// Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase
// checksum verification enabled, then automatically switch off hdfs checksum verification.
boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
this.fs = new HFileSystem(this.conf, useHBaseChecksum);
this.rootDir = FSUtils.getRootDir(this.conf);
this.tableDescriptors = getFsTableDescriptors();
service = new ExecutorService(getServerName().toShortString()); service = new ExecutorService(getServerName().toShortString());
spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration()); spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration());
@ -648,6 +639,19 @@ public class HRegionServer extends HasThread implements
choreService.scheduleChore(compactedFileDischarger); choreService.scheduleChore(compactedFileDischarger);
} }
private void initializeFileSystem() throws IOException {
// Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else
// underlying hadoop hdfs accessors will be going against wrong filesystem
// (unless all is set to defaults).
FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf));
// Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase
// checksum verification enabled, then automatically switch off hdfs checksum verification.
boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
this.fs = new HFileSystem(this.conf, useHBaseChecksum);
this.rootDir = FSUtils.getRootDir(this.conf);
this.tableDescriptors = getFsTableDescriptors();
}
protected TableDescriptors getFsTableDescriptors() throws IOException { protected TableDescriptors getFsTableDescriptors() throws IOException {
return new FSTableDescriptors(this.conf, return new FSTableDescriptors(this.conf,
this.fs, this.rootDir, !canUpdateTableDescriptor(), false); this.fs, this.rootDir, !canUpdateTableDescriptor(), false);
@ -1386,6 +1390,7 @@ public class HRegionServer extends HasThread implements
protected void handleReportForDutyResponse(final RegionServerStartupResponse c) protected void handleReportForDutyResponse(final RegionServerStartupResponse c)
throws IOException { throws IOException {
try { try {
boolean updateRootDir = false;
for (NameStringPair e : c.getMapEntriesList()) { for (NameStringPair e : c.getMapEntriesList()) {
String key = e.getName(); String key = e.getName();
// The hostname the master sees us as. // The hostname the master sees us as.
@ -1408,13 +1413,25 @@ public class HRegionServer extends HasThread implements
} }
continue; continue;
} }
String value = e.getValue(); String value = e.getValue();
if (key.equals(HConstants.HBASE_DIR)) {
if (value != null && !value.equals(conf.get(HConstants.HBASE_DIR))) {
updateRootDir = true;
}
}
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.info("Config from master: " + key + "=" + value); LOG.debug("Config from master: " + key + "=" + value);
} }
this.conf.set(key, value); this.conf.set(key, value);
} }
if (updateRootDir) {
// initialize file system by the config fs.defaultFS and hbase.rootdir from master
initializeFileSystem();
}
// hack! Maps DFSClient => RegionServer for logs. HDFS made this // hack! Maps DFSClient => RegionServer for logs. HDFS made this
// config param for task trackers, but we can piggyback off of it. // config param for task trackers, but we can piggyback off of it.
if (this.conf.get("mapreduce.task.attempt.id") == null) { if (this.conf.get("mapreduce.task.attempt.id") == null) {