HBASE-1887 Update hbase trunk to latests on hadoop 0.21 branch so we can all test sync/append; some more fixes

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@822596 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-10-07 05:52:55 +00:00
parent b853339319
commit f52749cacd
6 changed files with 34 additions and 81 deletions

View File

@ -28,13 +28,12 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.util.ReflectionUtils;
/**
* This class creates a single process HBase cluster. One thread is created for
@ -249,18 +248,14 @@ public class LocalHBaseCluster implements HConstants {
/**
* Shut down the mini HBase cluster
* @throws IOException
*/
public void shutdown() {
public void shutdown() throws IOException {
LOG.debug("Shutting down HBase Cluster");
// Be careful how the hdfs shutdown thread runs in context where more than
// one regionserver in the mix.
Thread shutdownThread = null;
// Be careful about how we shutdown hdfs.
synchronized (this.regionThreads) {
for (RegionServerThread t: this.regionThreads) {
Thread tt = t.getRegionServer().setHDFSShutdownThreadOnExit(null);
if (shutdownThread == null && tt != null) {
shutdownThread = tt;
}
t.getRegionServer().setShutdownHDFS(false);
}
}
if(this.master != null) {
@ -291,7 +286,7 @@ public class LocalHBaseCluster implements HConstants {
}
}
}
Threads.shutdown(shutdownThread);
FileSystem.closeAll();
LOG.info("Shutdown " +
((this.regionThreads != null)? this.master.getName(): "0 masters") +
" " + this.regionThreads.size() + " region server(s)");

View File

@ -595,7 +595,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
Path logsDirPath =
new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
if (!this.fs.exists(logsDirPath)) {
if (this.fs.mkdirs(logsDirPath))
if (!this.fs.mkdirs(logsDirPath))
throw new IOException("Failed create of " + logsDirPath);
}
FileStatus [] logFolders = this.fs.listStatus(logsDirPath);

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.regionserver.HRegionServer.ToDoEntry;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.util.StringUtils;
@ -203,14 +202,14 @@ class CompactSplitThread extends Thread implements HConstants {
oldRegionInfo.setSplit(true);
// Inform the HRegionServer that the parent HRegion is no-longer online.
this.server.removeFromOnlineRegions(oldRegionInfo);
Put put = new Put(oldRegionInfo.getRegionName());
put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER,
Writables.getBytes(oldRegionInfo));
Writables.getBytes(oldRegionInfo));
put.add(CATALOG_FAMILY, SPLITA_QUALIFIER,
Writables.getBytes(newRegions[0].getRegionInfo()));
Writables.getBytes(newRegions[0].getRegionInfo()));
put.add(CATALOG_FAMILY, SPLITB_QUALIFIER,
Writables.getBytes(newRegions[1].getRegionInfo()));
Writables.getBytes(newRegions[1].getRegionInfo()));
t.put(put);
// Add new regions to META

View File

@ -228,7 +228,7 @@ public class HRegionServer implements HConstants, HRegionInterface,
// The main region server thread.
private Thread regionServerThread;
// Run HDFS shutdown thread on exit if this is set. We clear this out when
// Run HDFS shutdown on exit if this is set. We clear this out when
// doing a restart() to prevent closing of HDFS.
private final AtomicBoolean shutdownHDFS = new AtomicBoolean(true);
@ -672,11 +672,6 @@ public class HRegionServer implements HConstants, HRegionInterface,
zooKeeperWrapper.close();
if (shutdownHDFS.get()) {
runThread(this.hdfsShutdownThread,
this.conf.getLong("hbase.dfs.shutdown.wait", 30000));
}
LOG.info(Thread.currentThread().getName() + " exiting");
}
@ -708,31 +703,6 @@ public class HRegionServer implements HConstants, HRegionInterface,
return null;
}
/**
* Run and wait on passed thread in HRS context.
* @param t
* @param dfsShutdownWait
*/
public void runThread(final Thread t, final long dfsShutdownWait) {
if (t == null) {
return;
}
t.start();
Threads.shutdown(t, dfsShutdownWait);
}
/**
* Set the hdfs shutdown thread to run on exit. Pass null to disable
* running of the shutdown test. Needed by tests.
* @param t Thread to run. Pass null to disable tests.
* @return Previous occupant of the shutdown thread position.
*/
public Thread setHDFSShutdownThreadOnExit(final Thread t) {
Thread old = this.hdfsShutdownThread;
this.hdfsShutdownThread = t;
return old;
}
/*
* Run init. Sets up hlog and starts up all server threads.
* @param c Extra configuration.
@ -766,9 +736,9 @@ public class HRegionServer implements HConstants, HRegionInterface,
this.fs = FileSystem.get(this.conf);
// Register shutdown hook for HRegionServer, runs an orderly shutdown
// when a kill signal is recieved
// when a kill signal is recieved. Shuts down hdfs too if its supposed.
Runtime.getRuntime().addShutdownHook(new ShutdownThread(this,
Thread.currentThread()));
Thread.currentThread(), this.shutdownHDFS));
this.conf.setBoolean("fs.automatic.close", false);
this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
@ -786,6 +756,12 @@ public class HRegionServer implements HConstants, HRegionInterface,
}
}
public void setShutdownHDFS(final boolean b) {
this.shutdownHDFS.set(b);
}
public boolean getShutdownHDFS() {return this.shutdownHDFS.get();}
/*
* @param r Region to get RegionLoad for.
* @return RegionLoad instance.
@ -970,14 +946,18 @@ public class HRegionServer implements HConstants, HRegionInterface,
private static class ShutdownThread extends Thread {
private final HRegionServer instance;
private final Thread mainThread;
private final AtomicBoolean shutdownHDFS;
/**
* @param instance
* @param mainThread
* @param shutdownHDFS
*/
public ShutdownThread(HRegionServer instance, Thread mainThread) {
public ShutdownThread(final HRegionServer instance, final Thread mainThread,
final AtomicBoolean shutdownHDFS) {
this.instance = instance;
this.mainThread = mainThread;
this.shutdownHDFS = shutdownHDFS;
}
@Override
@ -990,7 +970,7 @@ public class HRegionServer implements HConstants, HRegionInterface,
// Wait for main thread to exit.
Threads.shutdown(this.mainThread);
try {
FileSystem.closeAll();
if (this.shutdownHDFS.get()) FileSystem.closeAll();
} catch (IOException e) {
e.printStackTrace();
}
@ -999,9 +979,6 @@ public class HRegionServer implements HConstants, HRegionInterface,
}
}
// We need to call HDFS shutdown when we are done shutting down
private Thread hdfsShutdownThread;
/*
* Inner class that runs on a long period checking if regions need major
* compaction.

View File

@ -115,32 +115,13 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase {
// continue
}
/*
* Create dir and set its value into configuration.
* @param key Create dir under test for this key. Set its fully-qualified
* value into the conf.
* @throws IOException
*/
private void setupDFSConfig(final String key) throws IOException {
Path basedir =
new Path(this.conf.get(TEST_DIRECTORY_KEY, "test/build/data"));
FileSystem fs = FileSystem.get(this.conf);
Path dir = fs.makeQualified(new Path(basedir, key));
// Delete if exists. May contain data from old tests.
if (fs.exists(dir)) if (!fs.delete(dir, true)) throw new IOException("Delete: " + dir);
if (!fs.mkdirs(dir)) throw new IOException("Create: " + dir);
this.conf.set(key, dir.toString());
}
@Override
protected void setUp() throws Exception {
try {
if (this.startDfs) {
/*
setupDFSConfig("dfs.namenode.name.dir");
setupDFSConfig("dfs.datanode.data.dir");
*/
this.dfsCluster = new MiniDFSCluster(this.conf, 2, true, null);
// This spews a bunch of warnings about missing scheme. TODO: fix.
this.dfsCluster = new MiniDFSCluster(0, this.conf, 2, true, true, true,
null, null, null, null);
// mangle the conf so that the fs parameter points to the minidfs we
// just started up

View File

@ -147,7 +147,7 @@ public class MiniHBaseCluster implements HConstants {
LOG.info("Stopping " + server.toString());
if (!shutdownFS) {
// Stop the running of the hdfs shutdown thread in tests.
server.getRegionServer().setHDFSShutdownThreadOnExit(null);
server.getRegionServer().setShutdownHDFS(false);
}
server.getRegionServer().stop();
return server;
@ -172,8 +172,9 @@ public class MiniHBaseCluster implements HConstants {
/**
* Shut down the mini HBase cluster
* @throws IOException
*/
public void shutdown() {
public void shutdown() throws IOException {
if (this.hbaseCluster != null) {
this.hbaseCluster.shutdown();
}