From eb99b0a9a2960f27eb66acfe941c4b63d164bfce Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 7 Oct 2009 01:16:15 +0000 Subject: [PATCH] HBASE-1887 Update hbase trunk to latests on hadoop 0.21 branch so we can all test sync/append git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@822548 13f79535-47bb-0310-9956-ffa450edef68 --- src/contrib/build-contrib.xml | 2 +- .../hbase/stargate/MiniClusterTestCase.java | 2 +- .../apache/hadoop/hbase/io/hfile/HFile.java | 2 +- .../hbase/mapreduce/IndexRecordWriter.java | 8 +-- .../apache/hadoop/hbase/master/HMaster.java | 10 +++- .../hbase/regionserver/HRegionServer.java | 59 ++++--------------- .../hadoop/hbase/regionserver/Store.java | 6 +- .../hadoop/hbase/HBaseClusterTestCase.java | 28 +++++++-- .../hadoop/hbase/PerformanceEvaluation.java | 2 +- .../hadoop/hbase/util/TestMergeTool.java | 2 +- 10 files changed, 56 insertions(+), 65 deletions(-) diff --git a/src/contrib/build-contrib.xml b/src/contrib/build-contrib.xml index 1339ff1e04e..12f3ac36c58 100644 --- a/src/contrib/build-contrib.xml +++ b/src/contrib/build-contrib.xml @@ -267,7 +267,7 @@ --> - + diff --git a/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java b/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java index b802a4b9b5a..f6067adf792 100644 --- a/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java +++ b/src/contrib/stargate/src/test/org/apache/hadoop/hbase/stargate/MiniClusterTestCase.java @@ -86,7 +86,7 @@ public class MiniClusterTestCase extends TestCase { testDir = new File(path.toString()); dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null); FileSystem filesystem = dfsCluster.getFileSystem(); - conf.set("fs.default.name", filesystem.getUri().toString()); + conf.set("fs.defaultFS", filesystem.getUri().toString()); Path parentdir = filesystem.getHomeDirectory(); conf.set(HConstants.HBASE_DIR, parentdir.toString()); filesystem.mkdirs(parentdir); diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 941f8b1c8e7..11aeb20302b 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -1663,7 +1663,7 @@ public class HFile { boolean checkFamily = cmd.hasOption("a"); // get configuration, file system and get list of files HBaseConfiguration conf = new HBaseConfiguration(); - conf.set("fs.default.name", + conf.set("fs.defaultFS", conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR)); FileSystem fs = FileSystem.get(conf); ArrayList files = new ArrayList(); diff --git a/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java b/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java index 00928314ab7..53e3ccfa0c9 100644 --- a/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java +++ b/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java @@ -136,12 +136,8 @@ extends RecordWriter { */ @Override public void run() { - try { - context.setStatus("Closing"); - } catch (IOException e) { - return; - } - while (!closed) { + context.setStatus("Closing"); + while (!closed) { try { context.progress(); Thread.sleep(1000); diff --git a/src/java/org/apache/hadoop/hbase/master/HMaster.java b/src/java/org/apache/hadoop/hbase/master/HMaster.java index 4e7117530c0..e36bc95a4a1 100644 --- a/src/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/src/java/org/apache/hadoop/hbase/master/HMaster.java @@ -189,8 +189,8 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000); // The filesystem hbase wants to use is probably not what is set into - // fs.default.name; its value is probably the default. - this.conf.set("fs.default.name", this.rootdir.toString()); + // fs.defaultFS; its value is probably the default. + this.conf.set("fs.defaultFS", this.rootdir.toString()); this.fs = FileSystem.get(conf); if (this.fs instanceof DistributedFileSystem) { // Make sure dfs is not in safe mode @@ -594,6 +594,10 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, private void splitLogAfterStartup() throws IOException { Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME); + if (!this.fs.exists(logsDirPath)) { + if (this.fs.mkdirs(logsDirPath)) + throw new IOException("Failed create of " + logsDirPath); + } FileStatus [] logFolders = this.fs.listStatus(logsDirPath); if (logFolders == null || logFolders.length == 0) { LOG.debug("No log files to split, proceeding..."); @@ -706,7 +710,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, mw.put(new Text("hbase.regionserver.address"), new Text(rsAddress)); } - return addConfig(mw, "fs.default.name"); + return addConfig(mw, "fs.defaultFS"); } private MapWritable addConfig(final MapWritable mw, final String key) { diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index b5b6f112f5b..c7f0fb87f66 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -25,7 +25,6 @@ import java.lang.management.ManagementFactory; import java.lang.management.MemoryUsage; import java.lang.management.RuntimeMXBean; import java.lang.reflect.Constructor; -import java.lang.reflect.Field; import java.net.BindException; import java.net.InetSocketAddress; import java.util.ArrayList; @@ -759,18 +758,18 @@ public class HRegionServer implements HConstants, HRegionInterface, this.serverInfo.setServerAddress(hsa); } // Master sent us hbase.rootdir to use. Should be fully qualified - // path with file system specification included. Set 'fs.default.name' + // path with file system specification included. Set 'fs.defaultFS' // to match the filesystem on hbase.rootdir else underlying hadoop hdfs // accessors will be going against wrong filesystem (unless all is set // to defaults). - this.conf.set("fs.default.name", this.conf.get("hbase.rootdir")); + this.conf.set("fs.defaultFS", this.conf.get("hbase.rootdir")); this.fs = FileSystem.get(this.conf); // Register shutdown hook for HRegionServer, runs an orderly shutdown // when a kill signal is recieved Runtime.getRuntime().addShutdownHook(new ShutdownThread(this, Thread.currentThread())); - this.hdfsShutdownThread = suppressHdfsShutdownHook(); + this.conf.setBoolean("fs.automatic.close", false); this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR)); this.hlog = setupHLog(); @@ -983,16 +982,21 @@ public class HRegionServer implements HConstants, HRegionInterface, @Override public void run() { - LOG.info("Starting shutdown thread."); + LOG.info("Starting shutdown thread"); // tell the region server to stop - instance.stop(); + this.instance.stop(); // Wait for main thread to exit. - Threads.shutdown(mainThread); + Threads.shutdown(this.mainThread); + try { + FileSystem.closeAll(); + } catch (IOException e) { + e.printStackTrace(); + } LOG.info("Shutdown thread complete"); - } + } } // We need to call HDFS shutdown when we are done shutting down @@ -1029,43 +1033,6 @@ public class HRegionServer implements HConstants, HRegionInterface, } } } - - /** - * So, HDFS caches FileSystems so when you call FileSystem.get it's fast. In - * order to make sure things are cleaned up, it also creates a shutdown hook - * so that all filesystems can be closed when the process is terminated. This - * conveniently runs concurrently with our own shutdown handler, and - * therefore causes all the filesystems to be closed before the server can do - * all its necessary cleanup. - * - * The crazy dirty reflection in this method sneaks into the FileSystem cache - * and grabs the shutdown hook, removes it from the list of active shutdown - * hooks, and hangs onto it until later. Then, after we're properly done with - * our graceful shutdown, we can execute the hdfs hook manually to make sure - * loose ends are tied up. - * - * This seems quite fragile and susceptible to breaking if Hadoop changes - * anything about the way this cleanup is managed. Keep an eye on things. - */ - private Thread suppressHdfsShutdownHook() { - try { - Field field = FileSystem.class.getDeclaredField ("clientFinalizer"); - field.setAccessible(true); - Thread hdfsClientFinalizer = (Thread)field.get(null); - if (hdfsClientFinalizer == null) { - throw new RuntimeException("client finalizer is null, can't suppress!"); - } - Runtime.getRuntime().removeShutdownHook(hdfsClientFinalizer); - return hdfsClientFinalizer; - - } catch (NoSuchFieldException nsfe) { - LOG.fatal("Couldn't find field 'clientFinalizer' in FileSystem!", nsfe); - throw new RuntimeException("Failed to suppress HDFS shutdown hook"); - } catch (IllegalAccessException iae) { - LOG.fatal("Couldn't access field 'clientFinalizer' in FileSystem!", iae); - throw new RuntimeException("Failed to suppress HDFS shutdown hook"); - } - } /** * Report the status of the server. A server is online once all the startup @@ -2540,4 +2507,4 @@ public class HRegionServer implements HConstants, HRegionInterface, doMain(args, regionServerClass); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/hadoop/hbase/regionserver/Store.java b/src/java/org/apache/hadoop/hbase/regionserver/Store.java index 3ce047b96a9..95c0bbec8cd 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -162,11 +162,15 @@ public class Store implements HConstants, HeapSize { final Progressable reporter) throws IOException { HRegionInfo info = region.regionInfo; + this.fs = fs; this.homedir = getStoreHomedir(basedir, info.getEncodedName(), family.getName()); + if (!this.fs.exists(this.homedir)) { + if (!this.fs.mkdirs(this.homedir)) + throw new IOException("Failed create of: " + this.homedir.toString()); + } this.region = region; this.family = family; - this.fs = fs; this.conf = conf; this.blockcache = family.isBlockCacheEnabled(); this.blocksize = family.getBlocksize(); diff --git a/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java b/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java index 30fdbed39f7..7b438355c68 100644 --- a/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java +++ b/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java @@ -115,17 +115,37 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase { // continue } + /* + * Create dir and set its value into configuration. + * @param key Create dir under test for this key. Set its fully-qualified + * value into the conf. + * @throws IOException + */ + private void setupDFSConfig(final String key) throws IOException { + Path basedir = + new Path(this.conf.get(TEST_DIRECTORY_KEY, "test/build/data")); + FileSystem fs = FileSystem.get(this.conf); + Path dir = fs.makeQualified(new Path(basedir, key)); + // Delete if exists. May contain data from old tests. + if (fs.exists(dir)) if (!fs.delete(dir, true)) throw new IOException("Delete: " + dir); + if (!fs.mkdirs(dir)) throw new IOException("Create: " + dir); + this.conf.set(key, dir.toString()); + } + @Override protected void setUp() throws Exception { try { - if (startDfs) { - // start up the dfs - dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null); + if (this.startDfs) { + /* + setupDFSConfig("dfs.namenode.name.dir"); + setupDFSConfig("dfs.datanode.data.dir"); + */ + this.dfsCluster = new MiniDFSCluster(this.conf, 2, true, null); // mangle the conf so that the fs parameter points to the minidfs we // just started up FileSystem filesystem = dfsCluster.getFileSystem(); - conf.set("fs.default.name", filesystem.getUri().toString()); + conf.set("fs.defaultFS", filesystem.getUri().toString()); Path parentdir = filesystem.getHomeDirectory(); conf.set(HConstants.HBASE_DIR, parentdir.toString()); filesystem.mkdirs(parentdir); diff --git a/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java b/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java index e23b20c8758..8b1e8ce94d6 100644 --- a/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -862,7 +862,7 @@ public class PerformanceEvaluation implements HConstants { // mangle the conf so that the fs parameter points to the minidfs we // just started up FileSystem fs = dfsCluster.getFileSystem(); - conf.set("fs.default.name", fs.getUri().toString()); + conf.set("fs.defaultFS", fs.getUri().toString()); Path parentdir = fs.getHomeDirectory(); conf.set(HConstants.HBASE_DIR, parentdir.toString()); fs.mkdirs(parentdir); diff --git a/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java b/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java index 1270f563f34..25b0efe7e3b 100644 --- a/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java +++ b/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -112,7 +112,7 @@ public class TestMergeTool extends HBaseTestCase { // Start up dfs this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null); this.fs = this.dfsCluster.getFileSystem(); - conf.set("fs.default.name", fs.getUri().toString()); + conf.set("fs.defaultFS", fs.getUri().toString()); Path parentdir = fs.getHomeDirectory(); conf.set(HConstants.HBASE_DIR, parentdir.toString()); fs.mkdirs(parentdir);