HADOOP-1847 Many HBase tests do not fail well.

HADOOP-1793 (Phase 1) Remove TestHClient

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@573492 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2007-09-07 07:28:42 +00:00
parent 84ef0f95ef
commit 6626ee072a
18 changed files with 354 additions and 450 deletions

View File

@ -31,6 +31,7 @@ Trunk (unreleased changes)
HADOOP-1821 Replace all String.getBytes() with String.getBytes("UTF-8") HADOOP-1821 Replace all String.getBytes() with String.getBytes("UTF-8")
HADOOP-1832 listTables() returns duplicate tables HADOOP-1832 listTables() returns duplicate tables
HADOOP-1834 Scanners ignore timestamp passed on creation HADOOP-1834 Scanners ignore timestamp passed on creation
HADOOP-1847 Many HBase tests do not fail well.
IMPROVEMENTS IMPROVEMENTS
HADOOP-1737 Make HColumnDescriptor data publically members settable HADOOP-1737 Make HColumnDescriptor data publically members settable
@ -39,6 +40,7 @@ Trunk (unreleased changes)
filter types filter types
HADOOP-1760 Use new MapWritable and SortedMapWritable classes from HADOOP-1760 Use new MapWritable and SortedMapWritable classes from
org.apache.hadoop.io org.apache.hadoop.io
HADOOP-1793 (Phase 1) Remove TestHClient
HADOOP-1794 Remove deprecated APIs HADOOP-1794 Remove deprecated APIs
HADOOP-1802 Startup scripts should wait until hdfs as cleared 'safe mode' HADOOP-1802 Startup scripts should wait until hdfs as cleared 'safe mode'
HADOOP-1835 Updated Documentation for HBase setup/installation HADOOP-1835 Updated Documentation for HBase setup/installation

View File

@ -20,7 +20,6 @@
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.IOException; import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Random; import java.util.Random;
import org.apache.hadoop.dfs.MiniDFSCluster; import org.apache.hadoop.dfs.MiniDFSCluster;
@ -32,7 +31,7 @@ import org.apache.hadoop.io.Text;
/** Abstract base class for merge tests */ /** Abstract base class for merge tests */
public abstract class AbstractMergeTestBase extends HBaseTestCase { public abstract class AbstractMergeTestBase extends HBaseTestCase {
protected static final Text COLUMN_NAME = new Text("contents:"); protected static final Text COLUMN_NAME = new Text("contents:");
protected Random rand; protected final Random rand = new Random();
protected HTableDescriptor desc; protected HTableDescriptor desc;
protected ImmutableBytesWritable value; protected ImmutableBytesWritable value;
@ -46,7 +45,6 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
@Override @Override
public void setUp() throws Exception { public void setUp() throws Exception {
super.setUp(); super.setUp();
rand = new Random();
desc = new HTableDescriptor("test"); desc = new HTableDescriptor("test");
desc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString())); desc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString()));
@ -57,23 +55,11 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
while(val.length() < 1024) { while(val.length() < 1024) {
val.append(partialValue); val.append(partialValue);
} }
try {
value = new ImmutableBytesWritable(val.toString().getBytes(HConstants.UTF8_ENCODING));
} catch(UnsupportedEncodingException e) { value = new ImmutableBytesWritable(
fail(); val.toString().getBytes(HConstants.UTF8_ENCODING));
}
try {
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null); dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
fs = dfsCluster.getFileSystem();
dir = new Path("/hbase");
fs.mkdirs(dir);
} catch(Throwable t) {
t.printStackTrace();
fail();
}
// We create three data regions: The first is too large to merge since it // We create three data regions: The first is too large to merge since it
// will be > 64 MB in size. The second two will be smaller and will be // will be > 64 MB in size. The second two will be smaller and will be
@ -83,6 +69,10 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
// least 65536 rows. We will make certain by writing 70000 // least 65536 rows. We will make certain by writing 70000
try { try {
fs = dfsCluster.getFileSystem();
dir = new Path("/hbase");
fs.mkdirs(dir);
Text row_70001 = new Text("row_70001"); Text row_70001 = new Text("row_70001");
Text row_80001 = new Text("row_80001"); Text row_80001 = new Text("row_80001");
@ -95,8 +85,11 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
// Now create the root and meta regions and insert the data regions // Now create the root and meta regions and insert the data regions
// created above into the meta // created above into the meta
HRegion root = createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null); HRegion root =
HRegion meta = createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null); createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta =
createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion.addRegionToMETA(root, meta); HRegion.addRegionToMETA(root, meta);
@ -109,12 +102,11 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
meta.close(); meta.close();
meta.getLog().closeAndDelete(); meta.getLog().closeAndDelete();
} catch(Throwable t) { } catch (Exception e) {
t.printStackTrace();
if(dfsCluster != null) { if(dfsCluster != null) {
dfsCluster.shutdown(); dfsCluster.shutdown();
} }
fail(); throw e;
} }
} }
@ -124,13 +116,16 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
@Override @Override
public void tearDown() throws Exception { public void tearDown() throws Exception {
super.tearDown(); super.tearDown();
if (dfsCluster != null) {
dfsCluster.shutdown(); dfsCluster.shutdown();
} }
}
private HRegion createAregion(Text startKey, Text endKey, int firstRow, int nrows) private HRegion createAregion(Text startKey, Text endKey, int firstRow,
throws IOException { int nrows) throws IOException {
HRegion region = createNewHRegion(dir, conf, desc, rand.nextLong(), startKey, endKey); HRegion region =
createNewHRegion(dir, conf, desc, rand.nextLong(), startKey, endKey);
System.out.println("created region " + region.getRegionName()); System.out.println("created region " + region.getRegionName());

View File

@ -70,14 +70,10 @@ public abstract class HBaseTestCase extends TestCase {
@Override @Override
protected void tearDown() throws Exception { protected void tearDown() throws Exception {
try {
if (this.localFs != null && this.testDir != null && if (this.localFs != null && this.testDir != null &&
this.localFs.exists(testDir)) { this.localFs.exists(testDir)) {
this.localFs.delete(testDir); this.localFs.delete(testDir);
} }
} catch (Exception e) {
e.printStackTrace();
}
super.tearDown(); super.tearDown();
} }

View File

@ -62,6 +62,7 @@ public class MiniHBaseCluster implements HConstants {
*/ */
public MiniHBaseCluster(Configuration conf, int nRegionNodes) public MiniHBaseCluster(Configuration conf, int nRegionNodes)
throws IOException { throws IOException {
this(conf, nRegionNodes, true, true, true); this(conf, nRegionNodes, true, true, true);
} }
@ -76,6 +77,7 @@ public class MiniHBaseCluster implements HConstants {
*/ */
public MiniHBaseCluster(Configuration conf, int nRegionNodes, public MiniHBaseCluster(Configuration conf, int nRegionNodes,
final boolean miniHdfsFilesystem) throws IOException { final boolean miniHdfsFilesystem) throws IOException {
this(conf, nRegionNodes, miniHdfsFilesystem, true, true); this(conf, nRegionNodes, miniHdfsFilesystem, true, true);
} }
@ -88,8 +90,7 @@ public class MiniHBaseCluster implements HConstants {
* @throws IOException * @throws IOException
*/ */
public MiniHBaseCluster(Configuration conf, int nRegionNodes, public MiniHBaseCluster(Configuration conf, int nRegionNodes,
MiniDFSCluster dfsCluster) MiniDFSCluster dfsCluster) throws IOException {
throws IOException {
this.conf = conf; this.conf = conf;
this.cluster = dfsCluster; this.cluster = dfsCluster;
@ -110,33 +111,23 @@ public class MiniHBaseCluster implements HConstants {
public MiniHBaseCluster(Configuration conf, int nRegionNodes, public MiniHBaseCluster(Configuration conf, int nRegionNodes,
final boolean miniHdfsFilesystem, boolean format, boolean deleteOnExit) final boolean miniHdfsFilesystem, boolean format, boolean deleteOnExit)
throws IOException { throws IOException {
this.conf = conf; this.conf = conf;
this.deleteOnExit = deleteOnExit; this.deleteOnExit = deleteOnExit;
if (miniHdfsFilesystem) { if (miniHdfsFilesystem) {
try {
this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null); this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null);
} catch(Throwable t) {
LOG.error("Failed setup of mini dfs cluster", t);
t.printStackTrace();
return;
}
} }
init(nRegionNodes); init(nRegionNodes);
} }
private void init(final int nRegionNodes) private void init(final int nRegionNodes) throws IOException {
throws IOException {
try {
try { try {
this.fs = FileSystem.get(conf); this.fs = FileSystem.get(conf);
this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)); this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
fs.mkdirs(parentdir); fs.mkdirs(parentdir);
} catch(IOException e) {
LOG.error("Failed setup of FileSystem", e);
throw e;
}
this.masterThread = startMaster(this.conf); this.masterThread = startMaster(this.conf);
this.regionThreads = startRegionServers(this.conf, nRegionNodes); this.regionThreads = startRegionServers(this.conf, nRegionNodes);
} catch(IOException e) { } catch(IOException e) {
shutdown(); shutdown();
throw e; throw e;
@ -200,6 +191,7 @@ public class MiniHBaseCluster implements HConstants {
*/ */
public static MasterThread startMaster(final Configuration c) public static MasterThread startMaster(final Configuration c)
throws IOException { throws IOException {
if(c.get(MASTER_ADDRESS) == null) { if(c.get(MASTER_ADDRESS) == null) {
c.set(MASTER_ADDRESS, "localhost:0"); c.set(MASTER_ADDRESS, "localhost:0");
} }
@ -222,8 +214,8 @@ public class MiniHBaseCluster implements HConstants {
* @see #startMaster(Configuration) * @see #startMaster(Configuration)
*/ */
public static ArrayList<RegionServerThread> startRegionServers( public static ArrayList<RegionServerThread> startRegionServers(
final Configuration c, final int count) final Configuration c, final int count) throws IOException {
throws IOException {
// Start the HRegionServers. Always have regionservers come up on // Start the HRegionServers. Always have regionservers come up on
// port '0' so there won't be clashes over default port as unit tests // port '0' so there won't be clashes over default port as unit tests
// start/stop ports at different times during the life of the test. // start/stop ports at different times during the life of the test.
@ -249,8 +241,8 @@ public class MiniHBaseCluster implements HConstants {
} }
private static RegionServerThread startRegionServer(final Configuration c, private static RegionServerThread startRegionServer(final Configuration c,
final int index) final int index) throws IOException {
throws IOException {
final HRegionServer hsr = new HRegionServer(c); final HRegionServer hsr = new HRegionServer(c);
RegionServerThread t = new RegionServerThread(hsr, index); RegionServerThread t = new RegionServerThread(hsr, index);
t.start(); t.start();
@ -362,20 +354,25 @@ public class MiniHBaseCluster implements HConstants {
} }
void shutdown() { void shutdown() {
shutdown(this.masterThread, this.regionThreads); MiniHBaseCluster.shutdown(this.masterThread, this.regionThreads);
// Close the file system. Will complain if files open so helps w/ leaks.
try { try {
if (this.cluster != null && this.cluster.getFileSystem() != null) { if (cluster != null) {
this.cluster.getFileSystem().close(); FileSystem fs = cluster.getFileSystem();
}
} catch (IOException e) {
LOG.error("Closing down dfs", e);
}
if(cluster != null) {
LOG.info("Shutting down Mini DFS cluster"); LOG.info("Shutting down Mini DFS cluster");
cluster.shutdown(); cluster.shutdown();
if (fs != null) {
LOG.info("Shutting down FileSystem");
fs.close();
}
} }
} catch (IOException e) {
LOG.error("shutdown", e);
} finally {
// Delete all DFS files // Delete all DFS files
if(deleteOnExit) { if(deleteOnExit) {
deleteFile(new File(System.getProperty( deleteFile(new File(System.getProperty(
@ -383,6 +380,8 @@ public class MiniHBaseCluster implements HConstants {
} }
} }
}
private void deleteFile(File f) { private void deleteFile(File f) {
if(f.isDirectory()) { if(f.isDirectory()) {
File[] children = f.listFiles(); File[] children = f.listFiles();

View File

@ -34,11 +34,12 @@ public class MultiRegionTable extends HBaseTestCase {
*/ */
public static void makeMultiRegionTable(Configuration conf, public static void makeMultiRegionTable(Configuration conf,
MiniHBaseCluster cluster, FileSystem localFs, String tableName, MiniHBaseCluster cluster, FileSystem localFs, String tableName,
String columnName) String columnName) throws IOException {
throws IOException {
// This size should make it so we always split using the addContent // This size should make it so we always split using the addContent
// below. After adding all data, the first region is 1.3M. Should // below. After adding all data, the first region is 1.3M. Should
// set max filesize to be <= 1M. // set max filesize to be <= 1M.
assertTrue(conf.getLong("hbase.hregion.max.filesize", assertTrue(conf.getLong("hbase.hregion.max.filesize",
HConstants.DEFAULT_MAX_FILE_SIZE) <= 1024 * 1024); HConstants.DEFAULT_MAX_FILE_SIZE) <= 1024 * 1024);
@ -46,24 +47,33 @@ public class MultiRegionTable extends HBaseTestCase {
Path d = cluster.regionThreads.get(0).getRegionServer().rootDir; Path d = cluster.regionThreads.get(0).getRegionServer().rootDir;
FileSystem fs = (cluster.getDFSCluster() == null) ? FileSystem fs = (cluster.getDFSCluster() == null) ?
localFs : cluster.getDFSCluster().getFileSystem(); localFs : cluster.getDFSCluster().getFileSystem();
assertTrue(fs != null); assertNotNull(fs);
// Get connection on the meta table and get count of rows. // Get connection on the meta table and get count of rows.
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
int count = count(meta, HConstants.COLUMN_FAMILY_STR); int count = count(meta, HConstants.COLUMN_FAMILY_STR);
HTable t = new HTable(conf, new Text(tableName)); HTable t = new HTable(conf, new Text(tableName));
addContent(new HTableLoader(t), columnName); addContent(new HTableLoader(t), columnName);
// All is running in the one JVM so I should be able to get the single // All is running in the one JVM so I should be able to get the single
// region instance and bring on a split. // region instance and bring on a split.
HRegionInfo hri = HRegionInfo hri =
t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo(); t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
HRegion r = cluster.regionThreads.get(0).getRegionServer(). HRegion r = cluster.regionThreads.get(0).getRegionServer().
onlineRegions.get(hri.getRegionName()); onlineRegions.get(hri.getRegionName());
// Flush will provoke a split next time the split-checker thread runs. // Flush will provoke a split next time the split-checker thread runs.
r.flushcache(false); r.flushcache(false);
// Now, wait until split makes it into the meta table. // Now, wait until split makes it into the meta table.
for (int i = 0; i < retries &&
(count(meta, HConstants.COLUMN_FAMILY_STR) <= count); i++) { for (int i = 0;
i < retries && (count(meta, HConstants.COLUMN_FAMILY_STR) <= count);
i++) {
try { try {
Thread.sleep(5000); Thread.sleep(5000);
} catch (InterruptedException e) { } catch (InterruptedException e) {
@ -75,9 +85,11 @@ public class MultiRegionTable extends HBaseTestCase {
if (count <= oldCount) { if (count <= oldCount) {
throw new IOException("Failed waiting on splits to show up"); throw new IOException("Failed waiting on splits to show up");
} }
// Get info on the parent from the meta table. Pass in 'hri'. Its the // Get info on the parent from the meta table. Pass in 'hri'. Its the
// region we have been dealing with up to this. Its the parent of the // region we have been dealing with up to this. Its the parent of the
// region split. // region split.
Map<Text, byte []> data = getSplitParentInfo(meta, hri); Map<Text, byte []> data = getSplitParentInfo(meta, hri);
HRegionInfo parent = HRegionInfo parent =
Writables.getHRegionInfoOrNull(data.get(HConstants.COL_REGIONINFO)); Writables.getHRegionInfoOrNull(data.get(HConstants.COL_REGIONINFO));
@ -92,13 +104,19 @@ public class MultiRegionTable extends HBaseTestCase {
LOG.info("Split happened. Parent is " + parent.getRegionName() + LOG.info("Split happened. Parent is " + parent.getRegionName() +
" and daughters are " + splitA.getRegionName() + ", " + " and daughters are " + splitA.getRegionName() + ", " +
splitB.getRegionName()); splitB.getRegionName());
// Recalibrate will cause us to wait on new regions' deployment // Recalibrate will cause us to wait on new regions' deployment
recalibrate(t, new Text(columnName), retries); recalibrate(t, new Text(columnName), retries);
// Compact a region at a time so we can test case where one region has // Compact a region at a time so we can test case where one region has
// no references but the other still has some // no references but the other still has some
compact(cluster, splitA); compact(cluster, splitA);
// Wait till the parent only has reference to remaining split, one that // Wait till the parent only has reference to remaining split, one that
// still has references. // still has references.
while (getSplitParentInfo(meta, parent).size() == 3) { while (getSplitParentInfo(meta, parent).size() == 3) {
try { try {
Thread.sleep(5000); Thread.sleep(5000);
@ -108,21 +126,28 @@ public class MultiRegionTable extends HBaseTestCase {
} }
LOG.info("Parent split returned " + LOG.info("Parent split returned " +
getSplitParentInfo(meta, parent).keySet().toString()); getSplitParentInfo(meta, parent).keySet().toString());
// Call second split. // Call second split.
compact(cluster, splitB); compact(cluster, splitB);
// Now wait until parent disappears. // Now wait until parent disappears.
LOG.info("Waiting on parent " + parent.getRegionName() +
" to disappear"); LOG.info("Waiting on parent " + parent.getRegionName() + " to disappear");
for (int i = 0; i < retries && for (int i = 0;
getSplitParentInfo(meta, parent) != null; i++) { i < retries && getSplitParentInfo(meta, parent) != null;
i++) {
try { try {
Thread.sleep(5000); Thread.sleep(5000);
} catch (InterruptedException e) { } catch (InterruptedException e) {
// continue // continue
} }
} }
assertTrue(getSplitParentInfo(meta, parent) == null); assertNull(getSplitParentInfo(meta, parent));
// Assert cleaned up. // Assert cleaned up.
for (int i = 0; i < retries && fs.exists(parentDir); i++) { for (int i = 0; i < retries && fs.exists(parentDir); i++) {
try { try {
Thread.sleep(5000); Thread.sleep(5000);
@ -142,6 +167,7 @@ public class MultiRegionTable extends HBaseTestCase {
*/ */
private static int count(final HTable t, final String column) private static int count(final HTable t, final String column)
throws IOException { throws IOException {
int size = 0; int size = 0;
Text [] cols = new Text[] {new Text(column)}; Text [] cols = new Text[] {new Text(column)};
HScannerInterface s = t.obtainScanner(cols, HConstants.EMPTY_START_ROW, HScannerInterface s = t.obtainScanner(cols, HConstants.EMPTY_START_ROW,
@ -162,8 +188,8 @@ public class MultiRegionTable extends HBaseTestCase {
* @return Return row info for passed in region or null if not found in scan. * @return Return row info for passed in region or null if not found in scan.
*/ */
private static Map<Text, byte []> getSplitParentInfo(final HTable t, private static Map<Text, byte []> getSplitParentInfo(final HTable t,
final HRegionInfo parent) final HRegionInfo parent) throws IOException {
throws IOException {
HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY,
HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null); HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null);
try { try {
@ -199,6 +225,7 @@ public class MultiRegionTable extends HBaseTestCase {
*/ */
private static void recalibrate(final HTable t, final Text column, private static void recalibrate(final HTable t, final Text column,
final int retries) throws IOException { final int retries) throws IOException {
for (int i = 0; i < retries; i++) { for (int i = 0; i < retries; i++) {
try { try {
HScannerInterface s = HScannerInterface s =
@ -229,14 +256,15 @@ public class MultiRegionTable extends HBaseTestCase {
* @throws IOException * @throws IOException
*/ */
private static void compact(final MiniHBaseCluster cluster, private static void compact(final MiniHBaseCluster cluster,
final HRegionInfo r) final HRegionInfo r) throws IOException {
throws IOException {
LOG.info("Starting compaction"); LOG.info("Starting compaction");
for (MiniHBaseCluster.RegionServerThread thread: cluster.regionThreads) { for (MiniHBaseCluster.RegionServerThread thread: cluster.regionThreads) {
SortedMap<Text, HRegion> regions = SortedMap<Text, HRegion> regions = thread.getRegionServer().onlineRegions;
thread.getRegionServer().onlineRegions;
// Retry if ConcurrentModification... alternative of sync'ing is not // Retry if ConcurrentModification... alternative of sync'ing is not
// worth it for sake of unit test. // worth it for sake of unit test.
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
try { try {
for (HRegion online: regions.values()) { for (HRegion online: regions.values()) {

View File

@ -19,6 +19,7 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.IOException;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
@ -35,8 +36,9 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
private HTableDescriptor desc = null; private HTableDescriptor desc = null;
private HTable table = null; private HTable table = null;
/** constructor /**
* @throws UnsupportedEncodingException */ * @throws UnsupportedEncodingException
*/
public TestBatchUpdate() throws UnsupportedEncodingException { public TestBatchUpdate() throws UnsupportedEncodingException {
value = "abcd".getBytes(HConstants.UTF8_ENCODING); value = "abcd".getBytes(HConstants.UTF8_ENCODING);
} }
@ -49,19 +51,15 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
super.setUp(); super.setUp();
this.desc = new HTableDescriptor("test"); this.desc = new HTableDescriptor("test");
desc.addFamily(new HColumnDescriptor(CONTENTS_STR)); desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
try {
HBaseAdmin admin = new HBaseAdmin(conf); HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(desc); admin.createTable(desc);
table = new HTable(conf, desc.getName()); table = new HTable(conf, desc.getName());
} catch (Exception e) {
e.printStackTrace();
fail();
}
} }
/** the test case */ /**
public void testBatchUpdate() { * @throws IOException
*/
public void testBatchUpdate() throws IOException {
try { try {
table.commit(-1L); table.commit(-1L);
@ -74,7 +72,6 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
long lockid = table.startUpdate(new Text("row1")); long lockid = table.startUpdate(new Text("row1"));
try {
try { try {
@SuppressWarnings("unused") @SuppressWarnings("unused")
long dummy = table.startUpdate(new Text("row2")); long dummy = table.startUpdate(new Text("row2"));
@ -102,9 +99,5 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
new String(e.getValue(), HConstants.UTF8_ENCODING)); new String(e.getValue(), HConstants.UTF8_ENCODING));
} }
} }
} catch (Exception e) {
e.printStackTrace();
fail();
}
} }
} }

View File

@ -19,6 +19,7 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -146,11 +147,15 @@ public class TestBloomFilters extends HBaseClusterTestCase {
conf.set("hbase.regionserver.maxlogentries", "90"); // and roll log too conf.set("hbase.regionserver.maxlogentries", "90"); // and roll log too
} }
/** Test that specifies explicit parameters for the bloom filter */ /**
public void testExplicitParameters() { * Test that specifies explicit parameters for the bloom filter
* @throws IOException
*/
public void testExplicitParameters() throws IOException {
HTable table = null; HTable table = null;
try {
// Setup // Setup
HTableDescriptor desc = new HTableDescriptor(getName()); HTableDescriptor desc = new HTableDescriptor(getName());
BloomFilterDescriptor bloomFilter = BloomFilterDescriptor bloomFilter =
new BloomFilterDescriptor( // if we insert 1000 values new BloomFilterDescriptor( // if we insert 1000 values
@ -187,10 +192,6 @@ public class TestBloomFilters extends HBaseClusterTestCase {
table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
table.commit(lockid); table.commit(lockid);
} }
} catch (Exception e) {
e.printStackTrace();
fail();
}
try { try {
// Give cache flusher and log roller a chance to run // Give cache flusher and log roller a chance to run
// Otherwise we'll never hit the bloom filter, just the memcache // Otherwise we'll never hit the bloom filter, just the memcache
@ -201,8 +202,6 @@ public class TestBloomFilters extends HBaseClusterTestCase {
} }
try {
if (table != null) {
for(int i = 0; i < testKeys.length; i++) { for(int i = 0; i < testKeys.length; i++) {
byte[] value = table.get(testKeys[i], CONTENTS); byte[] value = table.get(testKeys[i], CONTENTS);
if(value != null && value.length != 0) { if(value != null && value.length != 0) {
@ -211,17 +210,16 @@ public class TestBloomFilters extends HBaseClusterTestCase {
} }
} }
} }
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
/** Test that uses computed for the bloom filter */ /**
public void testComputedParameters() { * Test that uses computed for the bloom filter
* @throws IOException
*/
public void testComputedParameters() throws IOException {
HTable table = null; HTable table = null;
try {
// Setup // Setup
HTableDescriptor desc = new HTableDescriptor(getName()); HTableDescriptor desc = new HTableDescriptor(getName());
BloomFilterDescriptor bloomFilter = BloomFilterDescriptor bloomFilter =
@ -259,10 +257,6 @@ public class TestBloomFilters extends HBaseClusterTestCase {
table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
table.commit(lockid); table.commit(lockid);
} }
} catch (Exception e) {
e.printStackTrace();
fail();
}
try { try {
// Give cache flusher and log roller a chance to run // Give cache flusher and log roller a chance to run
// Otherwise we'll never hit the bloom filter, just the memcache // Otherwise we'll never hit the bloom filter, just the memcache
@ -272,8 +266,6 @@ public class TestBloomFilters extends HBaseClusterTestCase {
// ignore // ignore
} }
try {
if (table != null) {
for(int i = 0; i < testKeys.length; i++) { for(int i = 0; i < testKeys.length; i++) {
byte[] value = table.get(testKeys[i], CONTENTS); byte[] value = table.get(testKeys[i], CONTENTS);
if(value != null && value.length != 0) { if(value != null && value.length != 0) {
@ -282,9 +274,4 @@ public class TestBloomFilters extends HBaseClusterTestCase {
} }
} }
} }
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
} }

View File

@ -30,13 +30,15 @@ import org.apache.commons.logging.LogFactory;
public class TestCompaction extends HBaseTestCase { public class TestCompaction extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestCompaction.class.getName()); static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
/** {@inheritDoc} */
@Override @Override
protected void setUp() throws Exception { public void setUp() throws Exception {
super.setUp(); super.setUp();
} }
/** {@inheritDoc} */
@Override @Override
protected void tearDown() throws Exception { public void tearDown() throws Exception {
super.tearDown(); super.tearDown();
} }

View File

@ -172,10 +172,6 @@ public class TestGet extends HBaseTestCase {
r.close(); r.close();
log.closeAndDelete(); log.closeAndDelete();
} catch(IOException e) {
e.printStackTrace();
throw e;
} finally { } finally {
if(cluster != null) { if(cluster != null) {
cluster.shutdown(); cluster.shutdown();

View File

@ -1,55 +0,0 @@
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Test HClient.
*/
@Deprecated
public class TestHClient extends HBaseClusterTestCase {
private Log LOG = LogFactory.getLog(this.getClass().getName());
private HClient client;
/** {@inheritDoc} */
@Override
public void setUp() throws Exception {
super.setUp();
this.client = new HClient(this.conf);
}
/** the test
* @throws Exception
*/
public void testCommandline() throws Exception {
final String m = "--master=" + this.conf.get(HConstants.MASTER_ADDRESS);
LOG.info("Creating table");
// Assert each of below returns 0: i.e. success.
assertEquals("create table", 0,
this.client.doCommandLine(
new String [] {m, "createTable", getName(), "family:", "1"}));
assertEquals("list tables", 0,
this.client.doCommandLine(new String [] {m, "listTables"}));
assertEquals("delete table", 0,
this.client.doCommandLine(new String [] {m, "deleteTable", getName()}));
}
}

View File

@ -37,9 +37,10 @@ public class TestHLog extends HBaseTestCase implements HConstants {
super.setUp(); super.setUp();
} }
/** The test */ /**
public void testAppend() { * @throws IOException
try { */
public void testAppend() throws IOException {
Path dir = getUnitTestdir(getName()); Path dir = getUnitTestdir(getName());
FileSystem fs = FileSystem.get(this.conf); FileSystem fs = FileSystem.get(this.conf);
if (fs.exists(dir)) { if (fs.exists(dir)) {
@ -98,10 +99,6 @@ public class TestHLog extends HBaseTestCase implements HConstants {
fs.delete(dir); fs.delete(dir);
} }
} }
} catch(IOException e) {
e.printStackTrace();
fail();
}
} }
/** {@inheritDoc} */ /** {@inheritDoc} */

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.hbase;
import java.io.IOException; import java.io.IOException;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
@ -46,11 +45,9 @@ public class TestHMemcache extends TestCase {
private static final String COLUMN_FAMILY = "column"; private static final String COLUMN_FAMILY = "column";
/* (non-Javadoc) /** {@inheritDoc} */
* @see junit.framework.TestCase#setUp()
*/
@Override @Override
protected void setUp() throws Exception { public void setUp() throws Exception {
super.setUp(); super.setUp();
this.hmemcache = new HMemcache(); this.hmemcache = new HMemcache();
// Set up a configuration that has configuration for a file // Set up a configuration that has configuration for a file
@ -58,11 +55,9 @@ public class TestHMemcache extends TestCase {
this.conf = new HBaseConfiguration(); this.conf = new HBaseConfiguration();
} }
/* (non-Javadoc) /** {@inheritDoc} */
* @see junit.framework.TestCase#tearDown()
*/
@Override @Override
protected void tearDown() throws Exception { public void tearDown() throws Exception {
super.tearDown(); super.tearDown();
} }
@ -70,10 +65,8 @@ public class TestHMemcache extends TestCase {
return new Text("row" + Integer.toString(index)); return new Text("row" + Integer.toString(index));
} }
private Text getColumnName(final int rowIndex, private Text getColumnName(final int rowIndex, final int colIndex) {
final int colIndex) { return new Text(COLUMN_FAMILY + ":" + Integer.toString(rowIndex) + ";" +
return new Text(COLUMN_FAMILY + ":" +
Integer.toString(rowIndex) + ";" +
Integer.toString(colIndex)); Integer.toString(colIndex));
} }
@ -81,16 +74,12 @@ public class TestHMemcache extends TestCase {
* Adds {@link #ROW_COUNT} rows and {@link #COLUMNS_COUNT} * Adds {@link #ROW_COUNT} rows and {@link #COLUMNS_COUNT}
* @param hmc Instance to add rows to. * @param hmc Instance to add rows to.
*/ */
private void addRows(final HMemcache hmc) { private void addRows(final HMemcache hmc) throws UnsupportedEncodingException {
for (int i = 0; i < ROW_COUNT; i++) { for (int i = 0; i < ROW_COUNT; i++) {
TreeMap<Text, byte []> columns = new TreeMap<Text, byte []>(); TreeMap<Text, byte []> columns = new TreeMap<Text, byte []>();
for (int ii = 0; ii < COLUMNS_COUNT; ii++) { for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
Text k = getColumnName(i, ii); Text k = getColumnName(i, ii);
try {
columns.put(k, k.toString().getBytes(HConstants.UTF8_ENCODING)); columns.put(k, k.toString().getBytes(HConstants.UTF8_ENCODING));
} catch (UnsupportedEncodingException e) {
fail();
}
} }
hmc.add(getRowName(i), columns, System.currentTimeMillis()); hmc.add(getRowName(i), columns, System.currentTimeMillis());
} }
@ -98,8 +87,8 @@ public class TestHMemcache extends TestCase {
private HLog getLogfile() throws IOException { private HLog getLogfile() throws IOException {
// Create a log file. // Create a log file.
Path testDir = new Path(conf.get("hadoop.tmp.dir", System Path testDir = new Path(conf.get("hadoop.tmp.dir",
.getProperty("java.tmp.dir")), "hbase"); System.getProperty("java.tmp.dir")), "hbase");
Path logFile = new Path(testDir, this.getName()); Path logFile = new Path(testDir, this.getName());
FileSystem fs = testDir.getFileSystem(conf); FileSystem fs = testDir.getFileSystem(conf);
// Cleanup any old log file. // Cleanup any old log file.
@ -111,6 +100,7 @@ public class TestHMemcache extends TestCase {
private Snapshot runSnapshot(final HMemcache hmc, final HLog log) private Snapshot runSnapshot(final HMemcache hmc, final HLog log)
throws IOException { throws IOException {
// Save off old state. // Save off old state.
int oldHistorySize = hmc.history.size(); int oldHistorySize = hmc.history.size();
TreeMap<HStoreKey, byte []> oldMemcache = hmc.memcache; TreeMap<HStoreKey, byte []> oldMemcache = hmc.memcache;
@ -151,12 +141,12 @@ public class TestHMemcache extends TestCase {
log.closeAndDelete(); log.closeAndDelete();
} }
private void isExpectedRow(final int rowIndex, private void isExpectedRow(final int rowIndex, TreeMap<Text, byte []> row)
TreeMap<Text, byte []> row) throws UnsupportedEncodingException { throws UnsupportedEncodingException {
int i = 0; int i = 0;
for (Text colname: row.keySet()) { for (Text colname: row.keySet()) {
String expectedColname = String expectedColname = getColumnName(rowIndex, i++).toString();
getColumnName(rowIndex, i++).toString();
String colnameStr = colname.toString(); String colnameStr = colname.toString();
assertEquals("Column name", colnameStr, expectedColname); assertEquals("Column name", colnameStr, expectedColname);
// Value is column name as bytes. Usually result is // Value is column name as bytes. Usually result is
@ -204,9 +194,7 @@ public class TestHMemcache extends TestCase {
assertEquals("Count of columns", COLUMNS_COUNT, assertEquals("Count of columns", COLUMNS_COUNT,
results.size()); results.size());
TreeMap<Text, byte []> row = new TreeMap<Text, byte []>(); TreeMap<Text, byte []> row = new TreeMap<Text, byte []>();
for(Iterator<Map.Entry<Text, byte []>> it = results.entrySet().iterator(); for(Map.Entry<Text, byte []> e: results.entrySet() ) {
it.hasNext(); ) {
Map.Entry<Text, byte []> e = it.next();
row.put(e.getKey(), e.getValue()); row.put(e.getKey(), e.getValue());
} }
isExpectedRow(i, row); isExpectedRow(i, row);

View File

@ -587,7 +587,7 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
} }
// NOTE: This test depends on testBatchWrite succeeding // NOTE: This test depends on testBatchWrite succeeding
void splitAndMerge() throws IOException { private void splitAndMerge() throws IOException {
Text midKey = new Text(); Text midKey = new Text();
if(region.needsSplit(midKey)) { if(region.needsSplit(midKey)) {
@ -829,8 +829,10 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
} catch (IOException e) { } catch (IOException e) {
e.printStackTrace(); e.printStackTrace();
} }
if (cluster != null) {
cluster.shutdown(); cluster.shutdown();
cluster = null; cluster = null;
}
// Delete all the DFS files // Delete all the DFS files

View File

@ -38,18 +38,12 @@ public class TestMasterAdmin extends HBaseClusterTestCase {
admin = null; admin = null;
} }
/** the test */ /** @throws Exception */
public void testMasterAdmin() { public void testMasterAdmin() throws Exception {
try {
admin = new HBaseAdmin(conf); admin = new HBaseAdmin(conf);
admin.createTable(testDesc); admin.createTable(testDesc);
admin.disableTable(testDesc.getName()); admin.disableTable(testDesc.getName());
} catch(Exception e) {
e.printStackTrace();
fail();
}
try { try {
try { try {
@SuppressWarnings("unused") @SuppressWarnings("unused")
@ -76,13 +70,7 @@ public class TestMasterAdmin extends HBaseClusterTestCase {
fail(); fail();
} finally { } finally {
try {
admin.deleteTable(testDesc.getName()); admin.deleteTable(testDesc.getName());
} catch(Exception e) {
e.printStackTrace();
fail();
}
} }
} }
} }

View File

@ -19,19 +19,17 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.IOException;
/** Tests region merging */ /** Tests region merging */
public class TestMergeMeta extends AbstractMergeTestBase { public class TestMergeMeta extends AbstractMergeTestBase {
/** /**
* test case * test case
* @throws IOException
*/ */
public void testMergeMeta() { public void testMergeMeta() throws IOException {
try { assertNotNull(dfsCluster);
HMerge.merge(conf, fs, HConstants.META_TABLE_NAME); HMerge.merge(conf, fs, HConstants.META_TABLE_NAME);
} catch(Throwable t) {
t.printStackTrace();
fail();
}
} }
} }

View File

@ -31,6 +31,7 @@ public class TestMergeTable extends AbstractMergeTestBase {
* @throws IOException * @throws IOException
*/ */
public void testMergeTable() throws IOException { public void testMergeTable() throws IOException {
assertNotNull(dfsCluster);
MiniHBaseCluster hCluster = new MiniHBaseCluster(conf, 1, dfsCluster); MiniHBaseCluster hCluster = new MiniHBaseCluster(conf, 1, dfsCluster);
try { try {
HMerge.merge(conf, fs, desc.getName()); HMerge.merge(conf, fs, desc.getName());

View File

@ -40,15 +40,9 @@ public class TestMultipleUpdates extends HBaseClusterTestCase {
super.setUp(); super.setUp();
this.desc = new HTableDescriptor("test"); this.desc = new HTableDescriptor("test");
desc.addFamily(new HColumnDescriptor(CONTENTS_STR)); desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
try {
HBaseAdmin admin = new HBaseAdmin(conf); HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(desc); admin.createTable(desc);
table = new HTable(conf, desc.getName()); table = new HTable(conf, desc.getName());
} catch (Exception e) {
e.printStackTrace();
fail();
}
} }
/** the test */ /** the test */

View File

@ -108,10 +108,6 @@ public class TestScanner extends HBaseTestCase {
results.clear(); results.clear();
} }
} catch(IOException e) {
e.printStackTrace();
throw e;
} finally { } finally {
if(scanner != null) { if(scanner != null) {
scanner.close(); scanner.close();
@ -258,9 +254,6 @@ public class TestScanner extends HBaseTestCase {
region.close(); region.close();
log.closeAndDelete(); log.closeAndDelete();
} catch(IOException e) {
e.printStackTrace();
throw e;
} finally { } finally {
if(cluster != null) { if(cluster != null) {