HADOOP-1966 Make HBase unit tests more reliable in the Hudson environment.

Set hbase.root in test/hbase-site.xml; when running a test, the default does not work consistantly.

When a HBase mini cluster is started on top of an existing mini dfs cluster, it should not shut down the mini dfs cluster when the mini HBase cluster is shut down.

TestDFSAbort catches exceptions, prints the stack trace and re-throws the exception, so you can see when the exception happened in the log.

Catch runtime exceptions that were escaping from FSUtils.isFileSystemAvailable, enabling more reliable detection of dfs failure. HRegionServer also now checks to see if it is still accepting client requests.



git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@580745 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2007-09-30 16:09:38 +00:00
parent 354c848546
commit 8a3bc9c23e
8 changed files with 148 additions and 102 deletions

View File

@ -62,6 +62,7 @@ Trunk (unreleased changes)
down is inconsistent b) TestDFSAbort failed in nightly #242 down is inconsistent b) TestDFSAbort failed in nightly #242
HADOOP-1929 Add hbase-default.xml to hbase jar HADOOP-1929 Add hbase-default.xml to hbase jar
HADOOP-1941 StopRowFilter throws NPE when passed null row HADOOP-1941 StopRowFilter throws NPE when passed null row
HADOOP-1966 Make HBase unit tests more reliable in the Hudson environment.
IMPROVEMENTS IMPROVEMENTS
HADOOP-1737 Make HColumnDescriptor data publically members settable HADOOP-1737 Make HColumnDescriptor data publically members settable

View File

@ -71,9 +71,6 @@ public interface HConstants {
/** Used to construct the name of the directory in which a HRegion resides */ /** Used to construct the name of the directory in which a HRegion resides */
static final String HREGIONDIR_PREFIX = "hregion_"; static final String HREGIONDIR_PREFIX = "hregion_";
// TODO: Someone may try to name a column family 'log'. If they
// do, it will clash with the HREGION log dir subdirectory. FIX.
/** Used to construct the name of the log directory for a region server */ /** Used to construct the name of the log directory for a region server */
static final String HREGION_LOGDIR_NAME = "log"; static final String HREGION_LOGDIR_NAME = "log";

View File

@ -504,7 +504,7 @@ HMasterRegionInterface {
LOG.error("Scan ROOT region", e); LOG.error("Scan ROOT region", e);
if (tries == numRetries - 1) { if (tries == numRetries - 1) {
// We ran out of tries. Make sure the file system is still available // We ran out of tries. Make sure the file system is still available
if (checkFileSystem()) { if (!checkFileSystem()) {
continue; // Avoid sleeping. continue; // Avoid sleeping.
} }
} }
@ -654,7 +654,7 @@ HMasterRegionInterface {
if (tries == numRetries - 1) { if (tries == numRetries - 1) {
// We ran out of tries. Make sure the file system is still // We ran out of tries. Make sure the file system is still
// available // available
if (checkFileSystem()) { if (!checkFileSystem()) {
continue; // avoid sleeping continue; // avoid sleeping
} }
} }
@ -941,7 +941,7 @@ HMasterRegionInterface {
*/ */
protected boolean checkFileSystem() { protected boolean checkFileSystem() {
if (fsOk) { if (fsOk) {
if (!FSUtils.isFileSystemAvailable(fs, closed)) { if (!FSUtils.isFileSystemAvailable(fs)) {
LOG.fatal("Shutting down HBase cluster: file system not available"); LOG.fatal("Shutting down HBase cluster: file system not available");
closed.set(true); closed.set(true);
fsOk = false; fsOk = false;

View File

@ -139,6 +139,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
30 * 1000), stop); 30 * 1000), stop);
} }
/** {@inheritDoc} */
public void closing(final Text regionName) { public void closing(final Text regionName) {
lock.writeLock().lock(); lock.writeLock().lock();
try { try {
@ -154,6 +155,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
} }
} }
/** {@inheritDoc} */
public void closed(final Text regionName) { public void closed(final Text regionName) {
lock.writeLock().lock(); lock.writeLock().lock();
try { try {
@ -458,9 +460,17 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
// get it when the master is panicing because for instance // get it when the master is panicing because for instance
// the HDFS has been yanked out from under it. Be wary of // the HDFS has been yanked out from under it. Be wary of
// this message. // this message.
if (checkFileSystem()) { try {
closeAllRegions(); if (checkFileSystem()) {
restart = true; closeAllRegions();
restart = true;
}
} catch (Exception e) {
LOG.fatal("file system available check failed. " +
"Shutting down server.", e);
this.stopRequested.set(true);
this.fsOk = false;
this.abortRequested = true;
} }
break; break;
@ -944,7 +954,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
/** {@inheritDoc} */ /** {@inheritDoc} */
public byte [] get(final Text regionName, final Text row, public byte [] get(final Text regionName, final Text row,
final Text column) throws IOException { final Text column) throws IOException {
checkOpen();
requestCount.incrementAndGet(); requestCount.incrementAndGet();
try { try {
return getRegion(regionName).get(row, column); return getRegion(regionName).get(row, column);
@ -958,7 +969,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
/** {@inheritDoc} */ /** {@inheritDoc} */
public byte [][] get(final Text regionName, final Text row, public byte [][] get(final Text regionName, final Text row,
final Text column, final int numVersions) throws IOException { final Text column, final int numVersions) throws IOException {
checkOpen();
requestCount.incrementAndGet(); requestCount.incrementAndGet();
try { try {
return getRegion(regionName).get(row, column, numVersions); return getRegion(regionName).get(row, column, numVersions);
@ -972,7 +984,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
/** {@inheritDoc} */ /** {@inheritDoc} */
public byte [][] get(final Text regionName, final Text row, final Text column, public byte [][] get(final Text regionName, final Text row, final Text column,
final long timestamp, final int numVersions) throws IOException { final long timestamp, final int numVersions) throws IOException {
checkOpen();
requestCount.incrementAndGet(); requestCount.incrementAndGet();
try { try {
return getRegion(regionName).get(row, column, timestamp, numVersions); return getRegion(regionName).get(row, column, timestamp, numVersions);
@ -986,7 +999,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
/** {@inheritDoc} */ /** {@inheritDoc} */
public MapWritable getRow(final Text regionName, final Text row) public MapWritable getRow(final Text regionName, final Text row)
throws IOException { throws IOException {
checkOpen();
requestCount.incrementAndGet(); requestCount.incrementAndGet();
try { try {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
@ -1006,7 +1020,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
/** {@inheritDoc} */ /** {@inheritDoc} */
public MapWritable next(final long scannerId) throws IOException { public MapWritable next(final long scannerId) throws IOException {
checkOpen();
requestCount.incrementAndGet(); requestCount.incrementAndGet();
try { try {
String scannerName = String.valueOf(scannerId); String scannerName = String.valueOf(scannerId);
@ -1044,7 +1059,9 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
/** {@inheritDoc} */ /** {@inheritDoc} */
public void batchUpdate(Text regionName, long timestamp, BatchUpdate b) public void batchUpdate(Text regionName, long timestamp, BatchUpdate b)
throws IOException { throws IOException {
checkOpen();
requestCount.incrementAndGet(); requestCount.incrementAndGet();
// If timestamp == LATEST_TIMESTAMP and we have deletes, then they need // If timestamp == LATEST_TIMESTAMP and we have deletes, then they need
// special treatment. For these we need to first find the latest cell so // special treatment. For these we need to first find the latest cell so
@ -1093,9 +1110,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
// remote scanner interface // remote scanner interface
// //
/** {@inheritDoc} */
public long openScanner(Text regionName, Text[] cols, Text firstRow, public long openScanner(Text regionName, Text[] cols, Text firstRow,
final long timestamp, final RowFilterInterface filter) final long timestamp, final RowFilterInterface filter)
throws IOException { throws IOException {
checkOpen();
requestCount.incrementAndGet(); requestCount.incrementAndGet();
try { try {
HRegion r = getRegion(regionName); HRegion r = getRegion(regionName);
@ -1110,7 +1130,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
leases.createLease(scannerId, scannerId, new ScannerListener(scannerName)); leases.createLease(scannerId, scannerId, new ScannerListener(scannerName));
return scannerId; return scannerId;
} catch (IOException e) { } catch (IOException e) {
LOG.error("Opening scanner (fsOk: " + this.fsOk + ")", LOG.error("Error opening scanner (fsOk: " + this.fsOk + ")",
RemoteExceptionHandler.checkIOException(e)); RemoteExceptionHandler.checkIOException(e));
checkFileSystem(); checkFileSystem();
throw e; throw e;
@ -1119,6 +1139,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
/** {@inheritDoc} */ /** {@inheritDoc} */
public void close(final long scannerId) throws IOException { public void close(final long scannerId) throws IOException {
checkOpen();
requestCount.incrementAndGet(); requestCount.incrementAndGet();
try { try {
String scannerName = String.valueOf(scannerId); String scannerName = String.valueOf(scannerId);
@ -1254,6 +1275,20 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
} }
} }
/**
* Called to verify that this server is up and running.
*
* @throws IOException
*/
private void checkOpen() throws IOException {
if (stopRequested.get() || abortRequested) {
throw new IOException("Server not running");
}
if (!fsOk) {
throw new IOException("File system not available");
}
}
/** /**
* Checks to see if the file system is still accessible. * Checks to see if the file system is still accessible.
* If not, sets abortRequested and stopRequested * If not, sets abortRequested and stopRequested
@ -1265,10 +1300,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
FileSystem fs = null; FileSystem fs = null;
try { try {
fs = FileSystem.get(this.conf); fs = FileSystem.get(this.conf);
} catch (IOException e) { if (fs != null && !FSUtils.isFileSystemAvailable(fs)) {
LOG.fatal("Shutting down HRegionServer: file system not available");
this.abortRequested = true;
this.stopRequested.set(true);
fsOk = false;
}
} catch (Exception e) {
LOG.error("Failed get of filesystem", e); LOG.error("Failed get of filesystem", e);
}
if (fs != null && !FSUtils.isFileSystemAvailable(fs, stopRequested)) {
LOG.fatal("Shutting down HRegionServer: file system not available"); LOG.fatal("Shutting down HRegionServer: file system not available");
this.abortRequested = true; this.abortRequested = true;
this.stopRequested.set(true); this.stopRequested.set(true);
@ -1301,6 +1340,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
return regionsToCheck; return regionsToCheck;
} }
/** {@inheritDoc} */
public long getProtocolVersion(final String protocol, public long getProtocolVersion(final String protocol,
@SuppressWarnings("unused") final long clientVersion) @SuppressWarnings("unused") final long clientVersion)
throws IOException { throws IOException {

View File

@ -26,7 +26,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.dfs.DistributedFileSystem; import org.apache.hadoop.dfs.DistributedFileSystem;
/** /**
@ -38,48 +37,37 @@ public class FSUtils {
/** /**
* Not instantiable * Not instantiable
*/ */
private FSUtils() {super();} private FSUtils() {}
/** /**
* Checks to see if the specified file system is available * Checks to see if the specified file system is available
* *
* @param fs * @param fs
* @param closed Optional flag. If non-null and set, will abort test of
* filesytem. Presumption is a flag shared by multiple threads. Another
* may have already determined the filesystem -- or something else -- bad.
* @return true if the specified file system is available. * @return true if the specified file system is available.
*/ */
public static boolean isFileSystemAvailable(final FileSystem fs, public static boolean isFileSystemAvailable(final FileSystem fs) {
final AtomicBoolean closed) {
if (!(fs instanceof DistributedFileSystem)) { if (!(fs instanceof DistributedFileSystem)) {
return true; return true;
} }
String exception = "";
boolean available = false; boolean available = false;
DistributedFileSystem dfs = (DistributedFileSystem) fs; DistributedFileSystem dfs = (DistributedFileSystem) fs;
int maxTries = dfs.getConf().getInt("hbase.client.retries.number", 3); try {
Path root = if (dfs.exists(new Path("/"))) {
fs.makeQualified(new Path(dfs.getConf().get(HConstants.HBASE_DIR, "/"))); available = true;
for (int i = 0; i < maxTries && (closed == null || !closed.get()); i++) {
IOException ex = null;
try {
if (dfs.exists(root)) {
available = true;
break;
}
} catch (IOException e) {
ex = e;
} }
String exception = (ex == null)? "": ": " + ex.getMessage(); } catch (IOException e) {
LOG.info("Failed exists test on " + root + " by thread " + exception = e.getMessage();
Thread.currentThread().getName() + " (Attempt " + i + " of " +
maxTries +"): " + exception);
} }
LOG.info("Failed file system available test. Thread: " +
Thread.currentThread().getName() + ": " + exception);
try { try {
if (!available) { if (!available) {
fs.close(); fs.close();
} }
} catch (IOException e) { } catch (Exception e) {
LOG.error("file system close failed: ", e); LOG.error("file system close failed: ", e);
} }
return available; return available;

View File

@ -75,4 +75,8 @@
the master will notice a dead region server sooner. The default is 15 seconds. the master will notice a dead region server sooner. The default is 15 seconds.
</description> </description>
</property> </property>
<property>
<name>hbase.rootdir</name>
<value>/hbase</value>
<description>location of HBase instance in dfs</description></property>
</configuration> </configuration>

View File

@ -33,14 +33,14 @@ import org.apache.log4j.Logger;
/** /**
* This class creates a single process HBase cluster for junit testing. * This class creates a single process HBase cluster for junit testing.
* One thread is created for each server. * One thread is created for each server.
* *
* <p>TestCases do not need to subclass to start a HBaseCluster. Call * <p>TestCases do not need to subclass to start a HBaseCluster. Call
* {@link #startMaster(Configuration)} and * {@link #startMaster(Configuration)} and
* {@link #startRegionServers(Configuration, int)} to startup master and * {@link #startRegionServers(Configuration, int)} to startup master and
* region servers. Save off the returned values and pass them to * region servers. Save off the returned values and pass them to
* {@link #shutdown(org.apache.hadoop.hbase.MiniHBaseCluster.MasterThread, List)} * {@link #shutdown(org.apache.hadoop.hbase.MiniHBaseCluster.MasterThread, List)}
* to shut it all down when done. * to shut it all down when done.
* *
*/ */
public class MiniHBaseCluster implements HConstants { public class MiniHBaseCluster implements HConstants {
static final Logger LOG = static final Logger LOG =
@ -48,6 +48,7 @@ public class MiniHBaseCluster implements HConstants {
private Configuration conf; private Configuration conf;
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private FileSystem fs; private FileSystem fs;
private boolean shutdownDFS;
private Path parentdir; private Path parentdir;
private MasterThread masterThread = null; private MasterThread masterThread = null;
ArrayList<RegionServerThread> regionThreads = ArrayList<RegionServerThread> regionThreads =
@ -56,21 +57,21 @@ public class MiniHBaseCluster implements HConstants {
/** /**
* Starts a MiniHBaseCluster on top of a new MiniDFSCluster * Starts a MiniHBaseCluster on top of a new MiniDFSCluster
* *
* @param conf * @param conf
* @param nRegionNodes * @param nRegionNodes
* @throws IOException * @throws IOException
*/ */
public MiniHBaseCluster(Configuration conf, int nRegionNodes) public MiniHBaseCluster(Configuration conf, int nRegionNodes)
throws IOException { throws IOException {
this(conf, nRegionNodes, true, true, true); this(conf, nRegionNodes, true, true, true);
} }
/** /**
* Start a MiniHBaseCluster. Use the native file system unless * Start a MiniHBaseCluster. Use the native file system unless
* miniHdfsFilesystem is set to true. * miniHdfsFilesystem is set to true.
* *
* @param conf * @param conf
* @param nRegionNodes * @param nRegionNodes
* @param miniHdfsFilesystem * @param miniHdfsFilesystem
@ -83,14 +84,20 @@ public class MiniHBaseCluster implements HConstants {
/** /**
* Starts a MiniHBaseCluster on top of an existing HDFSCluster * Starts a MiniHBaseCluster on top of an existing HDFSCluster
* *
* Note that if you use this constructor, you should shut down the mini dfs ****************************************************************************
* cluster in your test case. * * * * * * N O T E * * * * *
* *
* If you use this constructor, you should shut down the mini dfs cluster
* in your test case.
*
* * * * * * N O T E * * * * *
****************************************************************************
*
* @param conf * @param conf
* @param nRegionNodes * @param nRegionNodes
* @param dfsCluster * @param dfsCluster
* @throws IOException * @throws IOException
*/ */
public MiniHBaseCluster(Configuration conf, int nRegionNodes, public MiniHBaseCluster(Configuration conf, int nRegionNodes,
MiniDFSCluster dfsCluster) throws IOException { MiniDFSCluster dfsCluster) throws IOException {
@ -98,6 +105,7 @@ public class MiniHBaseCluster implements HConstants {
this.conf = conf; this.conf = conf;
this.fs = dfsCluster.getFileSystem(); this.fs = dfsCluster.getFileSystem();
this.cluster = dfsCluster; this.cluster = dfsCluster;
this.shutdownDFS = false;
init(nRegionNodes); init(nRegionNodes);
} }
@ -110,17 +118,19 @@ public class MiniHBaseCluster implements HConstants {
* filesystem configured in <code>conf</code>. * filesystem configured in <code>conf</code>.
* @param format the mini hdfs cluster * @param format the mini hdfs cluster
* @param deleteOnExit clean up mini hdfs files * @param deleteOnExit clean up mini hdfs files
* @throws IOException * @throws IOException
*/ */
public MiniHBaseCluster(Configuration conf, int nRegionNodes, public MiniHBaseCluster(Configuration conf, int nRegionNodes,
final boolean miniHdfsFilesystem, boolean format, boolean deleteOnExit) final boolean miniHdfsFilesystem, boolean format, boolean deleteOnExit)
throws IOException { throws IOException {
this.conf = conf; this.conf = conf;
this.deleteOnExit = deleteOnExit; this.deleteOnExit = deleteOnExit;
this.shutdownDFS = false;
if (miniHdfsFilesystem) { if (miniHdfsFilesystem) {
this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null); this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null);
this.fs = cluster.getFileSystem(); this.fs = cluster.getFileSystem();
this.shutdownDFS = true;
} else { } else {
this.cluster = null; this.cluster = null;
this.fs = FileSystem.get(conf); this.fs = FileSystem.get(conf);
@ -139,7 +149,7 @@ public class MiniHBaseCluster implements HConstants {
throw e; throw e;
} }
} }
/** runs the master server */ /** runs the master server */
public static class MasterThread extends Thread { public static class MasterThread extends Thread {
private final HMaster master; private final HMaster master;
@ -147,20 +157,20 @@ public class MiniHBaseCluster implements HConstants {
super(m, "Master:" + m.getMasterAddress().toString()); super(m, "Master:" + m.getMasterAddress().toString());
this.master = m; this.master = m;
} }
/** {@inheritDoc} */ /** {@inheritDoc} */
@Override @Override
public void run() { public void run() {
LOG.info("Starting " + getName()); LOG.info("Starting " + getName());
super.run(); super.run();
} }
/** @return master server */ /** @return master server */
public HMaster getMaster() { public HMaster getMaster() {
return this.master; return this.master;
} }
} }
/** runs region servers */ /** runs region servers */
public static class RegionServerThread extends Thread { public static class RegionServerThread extends Thread {
private final HRegionServer regionServer; private final HRegionServer regionServer;
@ -168,20 +178,20 @@ public class MiniHBaseCluster implements HConstants {
super(r, "RegionServer:" + index); super(r, "RegionServer:" + index);
this.regionServer = r; this.regionServer = r;
} }
/** {@inheritDoc} */ /** {@inheritDoc} */
@Override @Override
public void run() { public void run() {
LOG.info("Starting " + getName()); LOG.info("Starting " + getName());
super.run(); super.run();
} }
/** @return the region server */ /** @return the region server */
public HRegionServer getRegionServer() { public HRegionServer getRegionServer() {
return this.regionServer; return this.regionServer;
} }
} }
/** /**
* Use this method to start a master. * Use this method to start a master.
* If you want to start an hbase cluster * If you want to start an hbase cluster
@ -197,7 +207,7 @@ public class MiniHBaseCluster implements HConstants {
*/ */
public static MasterThread startMaster(final Configuration c) public static MasterThread startMaster(final Configuration c)
throws IOException { throws IOException {
if(c.get(MASTER_ADDRESS) == null) { if(c.get(MASTER_ADDRESS) == null) {
c.set(MASTER_ADDRESS, "localhost:0"); c.set(MASTER_ADDRESS, "localhost:0");
} }
@ -221,7 +231,7 @@ public class MiniHBaseCluster implements HConstants {
*/ */
public static ArrayList<RegionServerThread> startRegionServers( public static ArrayList<RegionServerThread> startRegionServers(
final Configuration c, final int count) throws IOException { final Configuration c, final int count) throws IOException {
// Start the HRegionServers. Always have regionservers come up on // Start the HRegionServers. Always have regionservers come up on
// port '0' so there won't be clashes over default port as unit tests // port '0' so there won't be clashes over default port as unit tests
// start/stop ports at different times during the life of the test. // start/stop ports at different times during the life of the test.
@ -234,10 +244,10 @@ public class MiniHBaseCluster implements HConstants {
} }
return threads; return threads;
} }
/** /**
* Starts a region server thread running * Starts a region server thread running
* *
* @throws IOException * @throws IOException
* @return Name of regionserver started. * @return Name of regionserver started.
*/ */
@ -247,10 +257,10 @@ public class MiniHBaseCluster implements HConstants {
this.regionThreads.add(t); this.regionThreads.add(t);
return t.getName(); return t.getName();
} }
private static RegionServerThread startRegionServer(final Configuration c, private static RegionServerThread startRegionServer(final Configuration c,
final int index) final int index)
throws IOException { throws IOException {
final HRegionServer hrs = new HRegionServer(c); final HRegionServer hrs = new HRegionServer(c);
RegionServerThread t = new RegionServerThread(hrs, index); RegionServerThread t = new RegionServerThread(hrs, index);
t.setName("regionserver" + t.setName("regionserver" +
@ -261,14 +271,14 @@ public class MiniHBaseCluster implements HConstants {
/** /**
* Get the cluster on which this HBase cluster is running * Get the cluster on which this HBase cluster is running
* *
* @return MiniDFSCluster * @return MiniDFSCluster
*/ */
public MiniDFSCluster getDFSCluster() { public MiniDFSCluster getDFSCluster() {
return cluster; return cluster;
} }
/** /**
* @return Returns the rpc address actually used by the master server, because * @return Returns the rpc address actually used by the master server, because
* the supplied port is not necessarily the actual port used. * the supplied port is not necessarily the actual port used.
*/ */
@ -278,7 +288,7 @@ public class MiniHBaseCluster implements HConstants {
/** /**
* Cause a region server to exit without cleaning up * Cause a region server to exit without cleaning up
* *
* @param serverNumber * @param serverNumber
*/ */
public void abortRegionServer(int serverNumber) { public void abortRegionServer(int serverNumber) {
@ -290,7 +300,7 @@ public class MiniHBaseCluster implements HConstants {
/** /**
* Shut down the specified region server cleanly * Shut down the specified region server cleanly
* *
* @param serverNumber * @param serverNumber
* @return the region server that was stopped * @return the region server that was stopped
*/ */
@ -320,7 +330,7 @@ public class MiniHBaseCluster implements HConstants {
} }
return regionServerThread.getName(); return regionServerThread.getName();
} }
/** /**
* Wait for Mini HBase Cluster to shut down. * Wait for Mini HBase Cluster to shut down.
*/ */
@ -346,7 +356,7 @@ public class MiniHBaseCluster implements HConstants {
} }
} }
} }
/** /**
* Shut down HBase cluster started by calling * Shut down HBase cluster started by calling
* {@link #startMaster(Configuration)} and then * {@link #startMaster(Configuration)} and then
@ -389,14 +399,17 @@ public class MiniHBaseCluster implements HConstants {
((masterThread != null)? masterThread.getName(): "0 masters") + " " + ((masterThread != null)? masterThread.getName(): "0 masters") + " " +
regionServerThreads.size() + " region server(s)"); regionServerThreads.size() + " region server(s)");
} }
/**
* Shut down the mini HBase cluster
*/
public void shutdown() { public void shutdown() {
MiniHBaseCluster.shutdown(this.masterThread, this.regionThreads); MiniHBaseCluster.shutdown(this.masterThread, this.regionThreads);
try { try {
if (cluster != null) { if (shutdownDFS && cluster != null) {
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
LOG.info("Shutting down Mini DFS cluster"); LOG.info("Shutting down Mini DFS cluster");
cluster.shutdown(); cluster.shutdown();
@ -405,10 +418,10 @@ public class MiniHBaseCluster implements HConstants {
fs.close(); fs.close();
} }
} }
} catch (IOException e) { } catch (IOException e) {
LOG.error("shutdown", e); LOG.error("shutdown", e);
} finally { } finally {
// Delete all DFS files // Delete all DFS files
if(deleteOnExit) { if(deleteOnExit) {
@ -428,7 +441,7 @@ public class MiniHBaseCluster implements HConstants {
} }
f.delete(); f.delete();
} }
/** /**
* Call flushCache on all regions on all participating regionservers. * Call flushCache on all regions on all participating regionservers.
* @throws IOException * @throws IOException

View File

@ -30,32 +30,35 @@ import org.apache.log4j.Logger;
*/ */
public class TestDFSAbort extends HBaseClusterTestCase { public class TestDFSAbort extends HBaseClusterTestCase {
/** constructor */
public TestDFSAbort() {
super();
Logger.getRootLogger().setLevel(Level.WARN);
Logger.getLogger(this.getClass().getPackage().getName()).setLevel(Level.DEBUG);
}
/** {@inheritDoc} */ /** {@inheritDoc} */
@Override @Override
public void setUp() throws Exception { public void setUp() throws Exception {
super.setUp(); try {
HTableDescriptor desc = new HTableDescriptor(getName()); super.setUp();
desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY_STR)); HTableDescriptor desc = new HTableDescriptor(getName());
HBaseAdmin admin = new HBaseAdmin(conf); desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY_STR));
admin.createTable(desc); HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(desc);
} catch (Exception e) {
e.printStackTrace();
throw e;
}
} }
/** /**
* @throws Exception * @throws Exception
*/ */
public void testDFSAbort() throws Exception { public void testDFSAbort() throws Exception {
// By now the Mini DFS is running, Mini HBase is running and we have try {
// created a table. Now let's yank the rug out from HBase // By now the Mini DFS is running, Mini HBase is running and we have
cluster.getDFSCluster().shutdown(); // created a table. Now let's yank the rug out from HBase
// Now wait for Mini HBase Cluster to shut down cluster.getDFSCluster().shutdown();
cluster.join(); // Now wait for Mini HBase Cluster to shut down
cluster.join();
} catch (Exception e) {
e.printStackTrace();
throw e;
}
} }
/** /**