HBASE-1887 Update hbase trunk to latests on hadoop 0.21 branch so we can all test sync/append

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@822548 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-10-07 01:16:15 +00:00
parent e5c8531001
commit eb99b0a9a2
10 changed files with 56 additions and 65 deletions

View File

@ -267,7 +267,7 @@
-->
<sysproperty key="user.dir" value="${build.test}/data"/>
<sysproperty key="fs.default.name" value="${fs.default.name}"/>
<sysproperty key="fs.defaultFS" value="${fs.default.name}"/>
<sysproperty key="hbase.test.localoutputfile" value="${hbase.test.localoutputfile}"/>
<sysproperty key="hbase.log.dir" value="${hbase.log.dir}"/>
<classpath refid="test.classpath"/>

View File

@ -86,7 +86,7 @@ public class MiniClusterTestCase extends TestCase {
testDir = new File(path.toString());
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
FileSystem filesystem = dfsCluster.getFileSystem();
conf.set("fs.default.name", filesystem.getUri().toString());
conf.set("fs.defaultFS", filesystem.getUri().toString());
Path parentdir = filesystem.getHomeDirectory();
conf.set(HConstants.HBASE_DIR, parentdir.toString());
filesystem.mkdirs(parentdir);

View File

@ -1663,7 +1663,7 @@ public class HFile {
boolean checkFamily = cmd.hasOption("a");
// get configuration, file system and get list of files
HBaseConfiguration conf = new HBaseConfiguration();
conf.set("fs.default.name",
conf.set("fs.defaultFS",
conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR));
FileSystem fs = FileSystem.get(conf);
ArrayList<Path> files = new ArrayList<Path>();

View File

@ -136,12 +136,8 @@ extends RecordWriter<ImmutableBytesWritable, LuceneDocumentWrapper> {
*/
@Override
public void run() {
try {
context.setStatus("Closing");
} catch (IOException e) {
return;
}
while (!closed) {
context.setStatus("Closing");
while (!closed) {
try {
context.progress();
Thread.sleep(1000);

View File

@ -189,8 +189,8 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
}
this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
// The filesystem hbase wants to use is probably not what is set into
// fs.default.name; its value is probably the default.
this.conf.set("fs.default.name", this.rootdir.toString());
// fs.defaultFS; its value is probably the default.
this.conf.set("fs.defaultFS", this.rootdir.toString());
this.fs = FileSystem.get(conf);
if (this.fs instanceof DistributedFileSystem) {
// Make sure dfs is not in safe mode
@ -594,6 +594,10 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
private void splitLogAfterStartup() throws IOException {
Path logsDirPath =
new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
if (!this.fs.exists(logsDirPath)) {
if (this.fs.mkdirs(logsDirPath))
throw new IOException("Failed create of " + logsDirPath);
}
FileStatus [] logFolders = this.fs.listStatus(logsDirPath);
if (logFolders == null || logFolders.length == 0) {
LOG.debug("No log files to split, proceeding...");
@ -706,7 +710,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
mw.put(new Text("hbase.regionserver.address"), new Text(rsAddress));
}
return addConfig(mw, "fs.default.name");
return addConfig(mw, "fs.defaultFS");
}
private MapWritable addConfig(final MapWritable mw, final String key) {

View File

@ -25,7 +25,6 @@ import java.lang.management.ManagementFactory;
import java.lang.management.MemoryUsage;
import java.lang.management.RuntimeMXBean;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.net.BindException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
@ -759,18 +758,18 @@ public class HRegionServer implements HConstants, HRegionInterface,
this.serverInfo.setServerAddress(hsa);
}
// Master sent us hbase.rootdir to use. Should be fully qualified
// path with file system specification included. Set 'fs.default.name'
// path with file system specification included. Set 'fs.defaultFS'
// to match the filesystem on hbase.rootdir else underlying hadoop hdfs
// accessors will be going against wrong filesystem (unless all is set
// to defaults).
this.conf.set("fs.default.name", this.conf.get("hbase.rootdir"));
this.conf.set("fs.defaultFS", this.conf.get("hbase.rootdir"));
this.fs = FileSystem.get(this.conf);
// Register shutdown hook for HRegionServer, runs an orderly shutdown
// when a kill signal is recieved
Runtime.getRuntime().addShutdownHook(new ShutdownThread(this,
Thread.currentThread()));
this.hdfsShutdownThread = suppressHdfsShutdownHook();
this.conf.setBoolean("fs.automatic.close", false);
this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
this.hlog = setupHLog();
@ -983,16 +982,21 @@ public class HRegionServer implements HConstants, HRegionInterface,
@Override
public void run() {
LOG.info("Starting shutdown thread.");
LOG.info("Starting shutdown thread");
// tell the region server to stop
instance.stop();
this.instance.stop();
// Wait for main thread to exit.
Threads.shutdown(mainThread);
Threads.shutdown(this.mainThread);
try {
FileSystem.closeAll();
} catch (IOException e) {
e.printStackTrace();
}
LOG.info("Shutdown thread complete");
}
}
}
// We need to call HDFS shutdown when we are done shutting down
@ -1029,43 +1033,6 @@ public class HRegionServer implements HConstants, HRegionInterface,
}
}
}
/**
* So, HDFS caches FileSystems so when you call FileSystem.get it's fast. In
* order to make sure things are cleaned up, it also creates a shutdown hook
* so that all filesystems can be closed when the process is terminated. This
* conveniently runs concurrently with our own shutdown handler, and
* therefore causes all the filesystems to be closed before the server can do
* all its necessary cleanup.
*
* The crazy dirty reflection in this method sneaks into the FileSystem cache
* and grabs the shutdown hook, removes it from the list of active shutdown
* hooks, and hangs onto it until later. Then, after we're properly done with
* our graceful shutdown, we can execute the hdfs hook manually to make sure
* loose ends are tied up.
*
* This seems quite fragile and susceptible to breaking if Hadoop changes
* anything about the way this cleanup is managed. Keep an eye on things.
*/
private Thread suppressHdfsShutdownHook() {
try {
Field field = FileSystem.class.getDeclaredField ("clientFinalizer");
field.setAccessible(true);
Thread hdfsClientFinalizer = (Thread)field.get(null);
if (hdfsClientFinalizer == null) {
throw new RuntimeException("client finalizer is null, can't suppress!");
}
Runtime.getRuntime().removeShutdownHook(hdfsClientFinalizer);
return hdfsClientFinalizer;
} catch (NoSuchFieldException nsfe) {
LOG.fatal("Couldn't find field 'clientFinalizer' in FileSystem!", nsfe);
throw new RuntimeException("Failed to suppress HDFS shutdown hook");
} catch (IllegalAccessException iae) {
LOG.fatal("Couldn't access field 'clientFinalizer' in FileSystem!", iae);
throw new RuntimeException("Failed to suppress HDFS shutdown hook");
}
}
/**
* Report the status of the server. A server is online once all the startup
@ -2540,4 +2507,4 @@ public class HRegionServer implements HConstants, HRegionInterface,
doMain(args, regionServerClass);
}
}
}

View File

@ -162,11 +162,15 @@ public class Store implements HConstants, HeapSize {
final Progressable reporter)
throws IOException {
HRegionInfo info = region.regionInfo;
this.fs = fs;
this.homedir = getStoreHomedir(basedir, info.getEncodedName(),
family.getName());
if (!this.fs.exists(this.homedir)) {
if (!this.fs.mkdirs(this.homedir))
throw new IOException("Failed create of: " + this.homedir.toString());
}
this.region = region;
this.family = family;
this.fs = fs;
this.conf = conf;
this.blockcache = family.isBlockCacheEnabled();
this.blocksize = family.getBlocksize();

View File

@ -115,17 +115,37 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase {
// continue
}
/*
* Create dir and set its value into configuration.
* @param key Create dir under test for this key. Set its fully-qualified
* value into the conf.
* @throws IOException
*/
private void setupDFSConfig(final String key) throws IOException {
Path basedir =
new Path(this.conf.get(TEST_DIRECTORY_KEY, "test/build/data"));
FileSystem fs = FileSystem.get(this.conf);
Path dir = fs.makeQualified(new Path(basedir, key));
// Delete if exists. May contain data from old tests.
if (fs.exists(dir)) if (!fs.delete(dir, true)) throw new IOException("Delete: " + dir);
if (!fs.mkdirs(dir)) throw new IOException("Create: " + dir);
this.conf.set(key, dir.toString());
}
@Override
protected void setUp() throws Exception {
try {
if (startDfs) {
// start up the dfs
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
if (this.startDfs) {
/*
setupDFSConfig("dfs.namenode.name.dir");
setupDFSConfig("dfs.datanode.data.dir");
*/
this.dfsCluster = new MiniDFSCluster(this.conf, 2, true, null);
// mangle the conf so that the fs parameter points to the minidfs we
// just started up
FileSystem filesystem = dfsCluster.getFileSystem();
conf.set("fs.default.name", filesystem.getUri().toString());
conf.set("fs.defaultFS", filesystem.getUri().toString());
Path parentdir = filesystem.getHomeDirectory();
conf.set(HConstants.HBASE_DIR, parentdir.toString());
filesystem.mkdirs(parentdir);

View File

@ -862,7 +862,7 @@ public class PerformanceEvaluation implements HConstants {
// mangle the conf so that the fs parameter points to the minidfs we
// just started up
FileSystem fs = dfsCluster.getFileSystem();
conf.set("fs.default.name", fs.getUri().toString());
conf.set("fs.defaultFS", fs.getUri().toString());
Path parentdir = fs.getHomeDirectory();
conf.set(HConstants.HBASE_DIR, parentdir.toString());
fs.mkdirs(parentdir);

View File

@ -112,7 +112,7 @@ public class TestMergeTool extends HBaseTestCase {
// Start up dfs
this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
this.fs = this.dfsCluster.getFileSystem();
conf.set("fs.default.name", fs.getUri().toString());
conf.set("fs.defaultFS", fs.getUri().toString());
Path parentdir = fs.getHomeDirectory();
conf.set(HConstants.HBASE_DIR, parentdir.toString());
fs.mkdirs(parentdir);