HADOOP-1560 NPE in MiniHBaseCluster on Windows

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@553080 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2007-07-04 04:48:18 +00:00
parent 655728f3bf
commit 9eb369c266
6 changed files with 57 additions and 39 deletions

View File

@ -45,3 +45,4 @@ Trunk (unreleased changes)
26. HADOOP-1543 [hbase] Add HClient.tableExists
27. HADOOP-1519 [hbase] map/reduce interface for HBase
28. HADOOP-1523 Hung region server waiting on write locks
29. HADOOP-1560 NPE in MiniHBaseCluster on Windows

View File

@ -32,7 +32,7 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
protected HTableDescriptor desc;
protected ImmutableBytesWritable value;
protected MiniDFSCluster dfsCluster;
protected MiniDFSCluster dfsCluster = null;
protected FileSystem fs;
protected Path dir;
@ -104,6 +104,9 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
} catch(Throwable t) {
t.printStackTrace();
if(dfsCluster != null) {
dfsCluster.shutdown();
}
fail();
}
}

View File

@ -37,8 +37,8 @@ public class MiniHBaseCluster implements HConstants {
private MiniDFSCluster cluster;
private FileSystem fs;
private Path parentdir;
private HMaster master;
private Thread masterThread;
private HMaster master = null;
private Thread masterThread = null;
List<HRegionServer> regionServers;
List<Thread> regionThreads;
private boolean deleteOnExit = true;
@ -83,6 +83,8 @@ public class MiniHBaseCluster implements HConstants {
this.conf = conf;
this.cluster = dfsCluster;
this.regionServers = new ArrayList<HRegionServer>(nRegionNodes);
this.regionThreads = new ArrayList<Thread>(nRegionNodes);
init(nRegionNodes);
}
@ -102,6 +104,8 @@ public class MiniHBaseCluster implements HConstants {
throws IOException {
this.conf = conf;
this.deleteOnExit = deleteOnExit;
this.regionServers = new ArrayList<HRegionServer>(nRegionNodes);
this.regionThreads = new ArrayList<Thread>(nRegionNodes);
if (miniHdfsFilesystem) {
try {
@ -167,8 +171,6 @@ public class MiniHBaseCluster implements HConstants {
private void startRegionServers(final int nRegionNodes)
throws IOException {
this.regionServers = new ArrayList<HRegionServer>(nRegionNodes);
this.regionThreads = new ArrayList<Thread>(nRegionNodes);
for(int i = 0; i < nRegionNodes; i++) {
startRegionServer();
}
@ -239,7 +241,9 @@ public class MiniHBaseCluster implements HConstants {
for(HRegionServer hsr: this.regionServers) {
hsr.stop();
}
master.shutdown();
if(master != null) {
master.shutdown();
}
for(Thread t: this.regionThreads) {
if (t.isAlive()) {
try {
@ -249,11 +253,13 @@ public class MiniHBaseCluster implements HConstants {
}
}
}
try {
masterThread.join();
if (masterThread != null) {
try {
masterThread.join();
} catch(InterruptedException e) {
// continue
} catch(InterruptedException e) {
// continue
}
}
LOG.info("HBase Cluster shutdown complete");

View File

@ -61,6 +61,9 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
cleanup();
} catch(Exception e) {
if(cluster != null) {
cluster.shutdown();
}
e.printStackTrace();
fail();
}
@ -798,6 +801,7 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
// Shut down the mini cluster
cluster.shutdown();
cluster = null;
// Delete all the DFS files

View File

@ -260,9 +260,6 @@ public class TestScanner extends HBaseTestCase {
throw e;
} finally {
if(fs != null) {
fs.close();
}
if(cluster != null) {
cluster.shutdown();
}

View File

@ -75,39 +75,46 @@ public class TestTableMapReduce extends HBaseTestCase {
desc.addFamily(new HColumnDescriptor(OUTPUT_COLUMN));
dfsCluster = new MiniDFSCluster(conf, 1, true, (String[])null);
fs = dfsCluster.getFileSystem();
dir = new Path("/hbase");
fs.mkdirs(dir);
try {
fs = dfsCluster.getFileSystem();
dir = new Path("/hbase");
fs.mkdirs(dir);
// create the root and meta regions and insert the data region into the meta
// create the root and meta regions and insert the data region into the meta
HRegion root = createNewHRegion(fs, dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta = createNewHRegion(fs, dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion.addRegionToMETA(root, meta);
HRegion root = createNewHRegion(fs, dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta = createNewHRegion(fs, dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion.addRegionToMETA(root, meta);
HRegion region = createNewHRegion(fs, dir, conf, desc, rand.nextLong(), null, null);
HRegion.addRegionToMETA(meta, region);
HRegion region = createNewHRegion(fs, dir, conf, desc, rand.nextLong(), null, null);
HRegion.addRegionToMETA(meta, region);
// insert some data into the test table
// insert some data into the test table
for(int i = 0; i < values.length; i++) {
long lockid = region.startUpdate(new Text("row_"
+ String.format("%1$05d", i)));
for(int i = 0; i < values.length; i++) {
long lockid = region.startUpdate(new Text("row_"
+ String.format("%1$05d", i)));
region.put(lockid, TEXT_INPUT_COLUMN, values[i]);
region.commit(lockid);
region.put(lockid, TEXT_INPUT_COLUMN, values[i]);
region.commit(lockid);
}
region.close();
region.getLog().closeAndDelete();
meta.close();
meta.getLog().closeAndDelete();
root.close();
root.getLog().closeAndDelete();
// Start up HBase cluster
hCluster = new MiniHBaseCluster(conf, 1, dfsCluster);
} catch (Exception e) {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
region.close();
region.getLog().closeAndDelete();
meta.close();
meta.getLog().closeAndDelete();
root.close();
root.getLog().closeAndDelete();
// Start up HBase cluster
hCluster = new MiniHBaseCluster(conf, 1, dfsCluster);
}
@Override