diff --git a/CHANGES.txt b/CHANGES.txt index a477b8fb870..93f68b217e1 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -31,6 +31,7 @@ Trunk (unreleased changes) HADOOP-1821 Replace all String.getBytes() with String.getBytes("UTF-8") HADOOP-1832 listTables() returns duplicate tables HADOOP-1834 Scanners ignore timestamp passed on creation + HADOOP-1847 Many HBase tests do not fail well. IMPROVEMENTS HADOOP-1737 Make HColumnDescriptor data publically members settable @@ -39,6 +40,7 @@ Trunk (unreleased changes) filter types HADOOP-1760 Use new MapWritable and SortedMapWritable classes from org.apache.hadoop.io + HADOOP-1793 (Phase 1) Remove TestHClient HADOOP-1794 Remove deprecated APIs HADOOP-1802 Startup scripts should wait until hdfs as cleared 'safe mode' HADOOP-1835 Updated Documentation for HBase setup/installation diff --git a/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java b/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java index 5e88b410ce5..820c918ba46 100644 --- a/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java +++ b/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; -import java.io.UnsupportedEncodingException; import java.util.Random; import org.apache.hadoop.dfs.MiniDFSCluster; @@ -32,7 +31,7 @@ import org.apache.hadoop.io.Text; /** Abstract base class for merge tests */ public abstract class AbstractMergeTestBase extends HBaseTestCase { protected static final Text COLUMN_NAME = new Text("contents:"); - protected Random rand; + protected final Random rand = new Random(); protected HTableDescriptor desc; protected ImmutableBytesWritable value; @@ -46,7 +45,6 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase { @Override public void setUp() throws Exception { super.setUp(); - rand = new Random(); desc = new HTableDescriptor("test"); desc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString())); @@ -57,24 +55,12 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase { while(val.length() < 1024) { val.append(partialValue); } - try { - value = new ImmutableBytesWritable(val.toString().getBytes(HConstants.UTF8_ENCODING)); - - } catch(UnsupportedEncodingException e) { - fail(); - } + + value = new ImmutableBytesWritable( + val.toString().getBytes(HConstants.UTF8_ENCODING)); - try { - dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null); - fs = dfsCluster.getFileSystem(); - dir = new Path("/hbase"); - fs.mkdirs(dir); + dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null); - } catch(Throwable t) { - t.printStackTrace(); - fail(); - } - // We create three data regions: The first is too large to merge since it // will be > 64 MB in size. The second two will be smaller and will be // selected for merging. @@ -83,6 +69,10 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase { // least 65536 rows. We will make certain by writing 70000 try { + fs = dfsCluster.getFileSystem(); + dir = new Path("/hbase"); + fs.mkdirs(dir); + Text row_70001 = new Text("row_70001"); Text row_80001 = new Text("row_80001"); @@ -95,8 +85,11 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase { // Now create the root and meta regions and insert the data regions // created above into the meta - HRegion root = createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null); - HRegion meta = createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null); + HRegion root = + createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null); + + HRegion meta = + createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null); HRegion.addRegionToMETA(root, meta); @@ -109,12 +102,11 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase { meta.close(); meta.getLog().closeAndDelete(); - } catch(Throwable t) { - t.printStackTrace(); + } catch (Exception e) { if(dfsCluster != null) { dfsCluster.shutdown(); } - fail(); + throw e; } } @@ -124,13 +116,16 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase { @Override public void tearDown() throws Exception { super.tearDown(); - dfsCluster.shutdown(); + if (dfsCluster != null) { + dfsCluster.shutdown(); + } } - private HRegion createAregion(Text startKey, Text endKey, int firstRow, int nrows) - throws IOException { + private HRegion createAregion(Text startKey, Text endKey, int firstRow, + int nrows) throws IOException { - HRegion region = createNewHRegion(dir, conf, desc, rand.nextLong(), startKey, endKey); + HRegion region = + createNewHRegion(dir, conf, desc, rand.nextLong(), startKey, endKey); System.out.println("created region " + region.getRegionName()); diff --git a/src/test/org/apache/hadoop/hbase/HBaseTestCase.java b/src/test/org/apache/hadoop/hbase/HBaseTestCase.java index f764b1def45..9fd3d449e50 100644 --- a/src/test/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/src/test/org/apache/hadoop/hbase/HBaseTestCase.java @@ -70,13 +70,9 @@ public abstract class HBaseTestCase extends TestCase { @Override protected void tearDown() throws Exception { - try { - if (this.localFs != null && this.testDir != null && - this.localFs.exists(testDir)) { - this.localFs.delete(testDir); - } - } catch (Exception e) { - e.printStackTrace(); + if (this.localFs != null && this.testDir != null && + this.localFs.exists(testDir)) { + this.localFs.delete(testDir); } super.tearDown(); } diff --git a/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java b/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java index d1902f72ed9..a3e9a8ee5a1 100644 --- a/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -61,7 +61,8 @@ public class MiniHBaseCluster implements HConstants { * @throws IOException */ public MiniHBaseCluster(Configuration conf, int nRegionNodes) - throws IOException { + throws IOException { + this(conf, nRegionNodes, true, true, true); } @@ -76,6 +77,7 @@ public class MiniHBaseCluster implements HConstants { */ public MiniHBaseCluster(Configuration conf, int nRegionNodes, final boolean miniHdfsFilesystem) throws IOException { + this(conf, nRegionNodes, miniHdfsFilesystem, true, true); } @@ -88,8 +90,7 @@ public class MiniHBaseCluster implements HConstants { * @throws IOException */ public MiniHBaseCluster(Configuration conf, int nRegionNodes, - MiniDFSCluster dfsCluster) - throws IOException { + MiniDFSCluster dfsCluster) throws IOException { this.conf = conf; this.cluster = dfsCluster; @@ -109,34 +110,24 @@ public class MiniHBaseCluster implements HConstants { */ public MiniHBaseCluster(Configuration conf, int nRegionNodes, final boolean miniHdfsFilesystem, boolean format, boolean deleteOnExit) - throws IOException { + throws IOException { + this.conf = conf; this.deleteOnExit = deleteOnExit; if (miniHdfsFilesystem) { - try { - this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null); - } catch(Throwable t) { - LOG.error("Failed setup of mini dfs cluster", t); - t.printStackTrace(); - return; - } + this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null); } init(nRegionNodes); } - private void init(final int nRegionNodes) - throws IOException { + private void init(final int nRegionNodes) throws IOException { try { - try { - this.fs = FileSystem.get(conf); - this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)); - fs.mkdirs(parentdir); - } catch(IOException e) { - LOG.error("Failed setup of FileSystem", e); - throw e; - } + this.fs = FileSystem.get(conf); + this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)); + fs.mkdirs(parentdir); this.masterThread = startMaster(this.conf); this.regionThreads = startRegionServers(this.conf, nRegionNodes); + } catch(IOException e) { shutdown(); throw e; @@ -199,7 +190,8 @@ public class MiniHBaseCluster implements HConstants { * @see #shutdown(org.apache.hadoop.hbase.MiniHBaseCluster.MasterThread, List) */ public static MasterThread startMaster(final Configuration c) - throws IOException { + throws IOException { + if(c.get(MASTER_ADDRESS) == null) { c.set(MASTER_ADDRESS, "localhost:0"); } @@ -222,8 +214,8 @@ public class MiniHBaseCluster implements HConstants { * @see #startMaster(Configuration) */ public static ArrayList startRegionServers( - final Configuration c, final int count) - throws IOException { + final Configuration c, final int count) throws IOException { + // Start the HRegionServers. Always have regionservers come up on // port '0' so there won't be clashes over default port as unit tests // start/stop ports at different times during the life of the test. @@ -249,8 +241,8 @@ public class MiniHBaseCluster implements HConstants { } private static RegionServerThread startRegionServer(final Configuration c, - final int index) - throws IOException { + final int index) throws IOException { + final HRegionServer hsr = new HRegionServer(c); RegionServerThread t = new RegionServerThread(hsr, index); t.start(); @@ -362,25 +354,32 @@ public class MiniHBaseCluster implements HConstants { } void shutdown() { - shutdown(this.masterThread, this.regionThreads); - // Close the file system. Will complain if files open so helps w/ leaks. + MiniHBaseCluster.shutdown(this.masterThread, this.regionThreads); + try { - if (this.cluster != null && this.cluster.getFileSystem() != null) { - this.cluster.getFileSystem().close(); + if (cluster != null) { + FileSystem fs = cluster.getFileSystem(); + + LOG.info("Shutting down Mini DFS cluster"); + cluster.shutdown(); + + if (fs != null) { + LOG.info("Shutting down FileSystem"); + fs.close(); + } } + } catch (IOException e) { - LOG.error("Closing down dfs", e); - } - if(cluster != null) { - LOG.info("Shutting down Mini DFS cluster"); - cluster.shutdown(); + LOG.error("shutdown", e); + + } finally { + // Delete all DFS files + if(deleteOnExit) { + deleteFile(new File(System.getProperty( + StaticTestEnvironment.TEST_DIRECTORY_KEY), "dfs")); + } } - // Delete all DFS files - if(deleteOnExit) { - deleteFile(new File(System.getProperty( - StaticTestEnvironment.TEST_DIRECTORY_KEY), "dfs")); - } } private void deleteFile(File f) { diff --git a/src/test/org/apache/hadoop/hbase/MultiRegionTable.java b/src/test/org/apache/hadoop/hbase/MultiRegionTable.java index ce20946373a..df385ca3a29 100644 --- a/src/test/org/apache/hadoop/hbase/MultiRegionTable.java +++ b/src/test/org/apache/hadoop/hbase/MultiRegionTable.java @@ -34,11 +34,12 @@ public class MultiRegionTable extends HBaseTestCase { */ public static void makeMultiRegionTable(Configuration conf, MiniHBaseCluster cluster, FileSystem localFs, String tableName, - String columnName) - throws IOException { + String columnName) throws IOException { + // This size should make it so we always split using the addContent // below. After adding all data, the first region is 1.3M. Should // set max filesize to be <= 1M. + assertTrue(conf.getLong("hbase.hregion.max.filesize", HConstants.DEFAULT_MAX_FILE_SIZE) <= 1024 * 1024); @@ -46,24 +47,33 @@ public class MultiRegionTable extends HBaseTestCase { Path d = cluster.regionThreads.get(0).getRegionServer().rootDir; FileSystem fs = (cluster.getDFSCluster() == null) ? localFs : cluster.getDFSCluster().getFileSystem(); - assertTrue(fs != null); + assertNotNull(fs); // Get connection on the meta table and get count of rows. + HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); int count = count(meta, HConstants.COLUMN_FAMILY_STR); HTable t = new HTable(conf, new Text(tableName)); addContent(new HTableLoader(t), columnName); + // All is running in the one JVM so I should be able to get the single // region instance and bring on a split. + HRegionInfo hri = t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo(); HRegion r = cluster.regionThreads.get(0).getRegionServer(). onlineRegions.get(hri.getRegionName()); + // Flush will provoke a split next time the split-checker thread runs. + r.flushcache(false); + // Now, wait until split makes it into the meta table. - for (int i = 0; i < retries && - (count(meta, HConstants.COLUMN_FAMILY_STR) <= count); i++) { + + for (int i = 0; + i < retries && (count(meta, HConstants.COLUMN_FAMILY_STR) <= count); + i++) { + try { Thread.sleep(5000); } catch (InterruptedException e) { @@ -75,9 +85,11 @@ public class MultiRegionTable extends HBaseTestCase { if (count <= oldCount) { throw new IOException("Failed waiting on splits to show up"); } + // Get info on the parent from the meta table. Pass in 'hri'. Its the // region we have been dealing with up to this. Its the parent of the // region split. + Map data = getSplitParentInfo(meta, hri); HRegionInfo parent = Writables.getHRegionInfoOrNull(data.get(HConstants.COL_REGIONINFO)); @@ -92,13 +104,19 @@ public class MultiRegionTable extends HBaseTestCase { LOG.info("Split happened. Parent is " + parent.getRegionName() + " and daughters are " + splitA.getRegionName() + ", " + splitB.getRegionName()); + // Recalibrate will cause us to wait on new regions' deployment + recalibrate(t, new Text(columnName), retries); + // Compact a region at a time so we can test case where one region has // no references but the other still has some + compact(cluster, splitA); + // Wait till the parent only has reference to remaining split, one that // still has references. + while (getSplitParentInfo(meta, parent).size() == 3) { try { Thread.sleep(5000); @@ -108,21 +126,28 @@ public class MultiRegionTable extends HBaseTestCase { } LOG.info("Parent split returned " + getSplitParentInfo(meta, parent).keySet().toString()); + // Call second split. + compact(cluster, splitB); + // Now wait until parent disappears. - LOG.info("Waiting on parent " + parent.getRegionName() + - " to disappear"); - for (int i = 0; i < retries && - getSplitParentInfo(meta, parent) != null; i++) { + + LOG.info("Waiting on parent " + parent.getRegionName() + " to disappear"); + for (int i = 0; + i < retries && getSplitParentInfo(meta, parent) != null; + i++) { + try { Thread.sleep(5000); } catch (InterruptedException e) { // continue } } - assertTrue(getSplitParentInfo(meta, parent) == null); + assertNull(getSplitParentInfo(meta, parent)); + // Assert cleaned up. + for (int i = 0; i < retries && fs.exists(parentDir); i++) { try { Thread.sleep(5000); @@ -141,7 +166,8 @@ public class MultiRegionTable extends HBaseTestCase { * @throws IOException */ private static int count(final HTable t, final String column) - throws IOException { + throws IOException { + int size = 0; Text [] cols = new Text[] {new Text(column)}; HScannerInterface s = t.obtainScanner(cols, HConstants.EMPTY_START_ROW, @@ -162,29 +188,29 @@ public class MultiRegionTable extends HBaseTestCase { * @return Return row info for passed in region or null if not found in scan. */ private static Map getSplitParentInfo(final HTable t, - final HRegionInfo parent) - throws IOException { - HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, + final HRegionInfo parent) throws IOException { + + HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null); - try { - HStoreKey curKey = new HStoreKey(); - TreeMap curVals = new TreeMap(); - while(s.next(curKey, curVals)) { - HRegionInfo hri = Writables. - getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO)); - if (hri == null) { - continue; - } - if (hri.getRegionName().toString(). - equals(parent.getRegionName().toString())) { - return curVals; - } + try { + HStoreKey curKey = new HStoreKey(); + TreeMap curVals = new TreeMap(); + while(s.next(curKey, curVals)) { + HRegionInfo hri = Writables. + getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO)); + if (hri == null) { + continue; } - return null; - } finally { - s.close(); - } - } + if (hri.getRegionName().toString(). + equals(parent.getRegionName().toString())) { + return curVals; + } + } + return null; + } finally { + s.close(); + } + } /* * Recalibrate passed in HTable. Run after change in region geography. @@ -199,6 +225,7 @@ public class MultiRegionTable extends HBaseTestCase { */ private static void recalibrate(final HTable t, final Text column, final int retries) throws IOException { + for (int i = 0; i < retries; i++) { try { HScannerInterface s = @@ -229,14 +256,15 @@ public class MultiRegionTable extends HBaseTestCase { * @throws IOException */ private static void compact(final MiniHBaseCluster cluster, - final HRegionInfo r) - throws IOException { + final HRegionInfo r) throws IOException { + LOG.info("Starting compaction"); for (MiniHBaseCluster.RegionServerThread thread: cluster.regionThreads) { - SortedMap regions = - thread.getRegionServer().onlineRegions; + SortedMap regions = thread.getRegionServer().onlineRegions; + // Retry if ConcurrentModification... alternative of sync'ing is not // worth it for sake of unit test. + for (int i = 0; i < 10; i++) { try { for (HRegion online: regions.values()) { diff --git a/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java b/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java index 0bf78a7a68e..04bfc0b0a8b 100644 --- a/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java +++ b/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java @@ -19,6 +19,7 @@ */ package org.apache.hadoop.hbase; +import java.io.IOException; import java.io.UnsupportedEncodingException; import java.util.Map; import java.util.TreeMap; @@ -35,8 +36,9 @@ public class TestBatchUpdate extends HBaseClusterTestCase { private HTableDescriptor desc = null; private HTable table = null; - /** constructor - * @throws UnsupportedEncodingException */ + /** + * @throws UnsupportedEncodingException + */ public TestBatchUpdate() throws UnsupportedEncodingException { value = "abcd".getBytes(HConstants.UTF8_ENCODING); } @@ -49,19 +51,15 @@ public class TestBatchUpdate extends HBaseClusterTestCase { super.setUp(); this.desc = new HTableDescriptor("test"); desc.addFamily(new HColumnDescriptor(CONTENTS_STR)); - try { - HBaseAdmin admin = new HBaseAdmin(conf); - admin.createTable(desc); - table = new HTable(conf, desc.getName()); - - } catch (Exception e) { - e.printStackTrace(); - fail(); - } + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); + table = new HTable(conf, desc.getName()); } - /** the test case */ - public void testBatchUpdate() { + /** + * @throws IOException + */ + public void testBatchUpdate() throws IOException { try { table.commit(-1L); @@ -75,36 +73,31 @@ public class TestBatchUpdate extends HBaseClusterTestCase { long lockid = table.startUpdate(new Text("row1")); try { - try { - @SuppressWarnings("unused") - long dummy = table.startUpdate(new Text("row2")); - } catch (IllegalStateException e) { - // expected - } catch (Exception e) { - e.printStackTrace(); - fail(); - } - table.put(lockid, CONTENTS, value); - table.delete(lockid, CONTENTS); - table.commit(lockid); - - lockid = table.startUpdate(new Text("row2")); - table.put(lockid, CONTENTS, value); - table.commit(lockid); - - Text[] columns = { CONTENTS }; - HScannerInterface scanner = table.obtainScanner(columns, new Text()); - HStoreKey key = new HStoreKey(); - TreeMap results = new TreeMap(); - while(scanner.next(key, results)) { - for(Map.Entry e: results.entrySet()) { - System.out.println(key + ": row: " + e.getKey() + " value: " + - new String(e.getValue(), HConstants.UTF8_ENCODING)); - } - } + @SuppressWarnings("unused") + long dummy = table.startUpdate(new Text("row2")); + } catch (IllegalStateException e) { + // expected } catch (Exception e) { e.printStackTrace(); fail(); } + table.put(lockid, CONTENTS, value); + table.delete(lockid, CONTENTS); + table.commit(lockid); + + lockid = table.startUpdate(new Text("row2")); + table.put(lockid, CONTENTS, value); + table.commit(lockid); + + Text[] columns = { CONTENTS }; + HScannerInterface scanner = table.obtainScanner(columns, new Text()); + HStoreKey key = new HStoreKey(); + TreeMap results = new TreeMap(); + while(scanner.next(key, results)) { + for(Map.Entry e: results.entrySet()) { + System.out.println(key + ": row: " + e.getKey() + " value: " + + new String(e.getValue(), HConstants.UTF8_ENCODING)); + } + } } } diff --git a/src/test/org/apache/hadoop/hbase/TestBloomFilters.java b/src/test/org/apache/hadoop/hbase/TestBloomFilters.java index eced4f85c49..f0d19ff6bf1 100644 --- a/src/test/org/apache/hadoop/hbase/TestBloomFilters.java +++ b/src/test/org/apache/hadoop/hbase/TestBloomFilters.java @@ -19,6 +19,7 @@ */ package org.apache.hadoop.hbase; +import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.io.Text; @@ -146,50 +147,50 @@ public class TestBloomFilters extends HBaseClusterTestCase { conf.set("hbase.regionserver.maxlogentries", "90"); // and roll log too } - /** Test that specifies explicit parameters for the bloom filter */ - public void testExplicitParameters() { + /** + * Test that specifies explicit parameters for the bloom filter + * @throws IOException + */ + public void testExplicitParameters() throws IOException { HTable table = null; - try { - // Setup - HTableDescriptor desc = new HTableDescriptor(getName()); - BloomFilterDescriptor bloomFilter = - new BloomFilterDescriptor( // if we insert 1000 values - BloomFilterDescriptor.BloomFilterType.BLOOMFILTER, // plain old bloom filter - 12499, // number of bits - 4 // number of hash functions - ); - - desc.addFamily( - new HColumnDescriptor(CONTENTS, // Column name - 1, // Max versions - HColumnDescriptor.CompressionType.NONE, // no compression - HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory - HColumnDescriptor.DEFAULT_MAX_VALUE_LENGTH, - bloomFilter - ) + + // Setup + + HTableDescriptor desc = new HTableDescriptor(getName()); + BloomFilterDescriptor bloomFilter = + new BloomFilterDescriptor( // if we insert 1000 values + BloomFilterDescriptor.BloomFilterType.BLOOMFILTER, // plain old bloom filter + 12499, // number of bits + 4 // number of hash functions ); - - // Create the table - - HBaseAdmin admin = new HBaseAdmin(conf); - admin.createTable(desc); - - // Open table - - table = new HTable(conf, desc.getName()); - // Store some values + desc.addFamily( + new HColumnDescriptor(CONTENTS, // Column name + 1, // Max versions + HColumnDescriptor.CompressionType.NONE, // no compression + HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory + HColumnDescriptor.DEFAULT_MAX_VALUE_LENGTH, + bloomFilter + ) + ); - for(int i = 0; i < 100; i++) { - Text row = rows[i]; - String value = row.toString(); - long lockid = table.startUpdate(rows[i]); - table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); - table.commit(lockid); - } - } catch (Exception e) { - e.printStackTrace(); - fail(); + // Create the table + + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); + + // Open table + + table = new HTable(conf, desc.getName()); + + // Store some values + + for(int i = 0; i < 100; i++) { + Text row = rows[i]; + String value = row.toString(); + long lockid = table.startUpdate(rows[i]); + table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); + table.commit(lockid); } try { // Give cache flusher and log roller a chance to run @@ -201,67 +202,60 @@ public class TestBloomFilters extends HBaseClusterTestCase { } - try { - if (table != null) { - for(int i = 0; i < testKeys.length; i++) { - byte[] value = table.get(testKeys[i], CONTENTS); - if(value != null && value.length != 0) { - LOG.info("non existant key: " + testKeys[i] + " returned value: " + - new String(value, HConstants.UTF8_ENCODING)); - } - } + for(int i = 0; i < testKeys.length; i++) { + byte[] value = table.get(testKeys[i], CONTENTS); + if(value != null && value.length != 0) { + LOG.info("non existant key: " + testKeys[i] + " returned value: " + + new String(value, HConstants.UTF8_ENCODING)); } - } catch (Exception e) { - e.printStackTrace(); - fail(); } } - /** Test that uses computed for the bloom filter */ - public void testComputedParameters() { + /** + * Test that uses computed for the bloom filter + * @throws IOException + */ + public void testComputedParameters() throws IOException { HTable table = null; - try { - // Setup - HTableDescriptor desc = new HTableDescriptor(getName()); + + // Setup + + HTableDescriptor desc = new HTableDescriptor(getName()); - BloomFilterDescriptor bloomFilter = - new BloomFilterDescriptor( - BloomFilterDescriptor.BloomFilterType.BLOOMFILTER, // plain old bloom filter - 1000 // estimated number of entries - ); - LOG.info("vector size: " + bloomFilter.vectorSize); - - desc.addFamily( - new HColumnDescriptor(CONTENTS, // Column name - 1, // Max versions - HColumnDescriptor.CompressionType.NONE, // no compression - HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory - HColumnDescriptor.DEFAULT_MAX_VALUE_LENGTH, - bloomFilter - ) + BloomFilterDescriptor bloomFilter = + new BloomFilterDescriptor( + BloomFilterDescriptor.BloomFilterType.BLOOMFILTER, // plain old bloom filter + 1000 // estimated number of entries ); - - // Create the table - - HBaseAdmin admin = new HBaseAdmin(conf); - admin.createTable(desc); - - // Open table - - table = new HTable(conf, desc.getName()); + LOG.info("vector size: " + bloomFilter.vectorSize); - // Store some values + desc.addFamily( + new HColumnDescriptor(CONTENTS, // Column name + 1, // Max versions + HColumnDescriptor.CompressionType.NONE, // no compression + HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory + HColumnDescriptor.DEFAULT_MAX_VALUE_LENGTH, + bloomFilter + ) + ); - for(int i = 0; i < 100; i++) { - Text row = rows[i]; - String value = row.toString(); - long lockid = table.startUpdate(rows[i]); - table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); - table.commit(lockid); - } - } catch (Exception e) { - e.printStackTrace(); - fail(); + // Create the table + + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); + + // Open table + + table = new HTable(conf, desc.getName()); + + // Store some values + + for(int i = 0; i < 100; i++) { + Text row = rows[i]; + String value = row.toString(); + long lockid = table.startUpdate(rows[i]); + table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING)); + table.commit(lockid); } try { // Give cache flusher and log roller a chance to run @@ -272,19 +266,12 @@ public class TestBloomFilters extends HBaseClusterTestCase { // ignore } - try { - if (table != null) { - for(int i = 0; i < testKeys.length; i++) { - byte[] value = table.get(testKeys[i], CONTENTS); - if(value != null && value.length != 0) { - LOG.info("non existant key: " + testKeys[i] + " returned value: " + - new String(value, HConstants.UTF8_ENCODING)); - } - } + for(int i = 0; i < testKeys.length; i++) { + byte[] value = table.get(testKeys[i], CONTENTS); + if(value != null && value.length != 0) { + LOG.info("non existant key: " + testKeys[i] + " returned value: " + + new String(value, HConstants.UTF8_ENCODING)); } - } catch (Exception e) { - e.printStackTrace(); - fail(); } } } diff --git a/src/test/org/apache/hadoop/hbase/TestCompaction.java b/src/test/org/apache/hadoop/hbase/TestCompaction.java index 520bc0b6408..0f53a67b5d0 100644 --- a/src/test/org/apache/hadoop/hbase/TestCompaction.java +++ b/src/test/org/apache/hadoop/hbase/TestCompaction.java @@ -30,13 +30,15 @@ import org.apache.commons.logging.LogFactory; public class TestCompaction extends HBaseTestCase { static final Log LOG = LogFactory.getLog(TestCompaction.class.getName()); + /** {@inheritDoc} */ @Override - protected void setUp() throws Exception { + public void setUp() throws Exception { super.setUp(); } + /** {@inheritDoc} */ @Override - protected void tearDown() throws Exception { + public void tearDown() throws Exception { super.tearDown(); } diff --git a/src/test/org/apache/hadoop/hbase/TestGet.java b/src/test/org/apache/hadoop/hbase/TestGet.java index dfc5e522c4f..2e5e298f590 100644 --- a/src/test/org/apache/hadoop/hbase/TestGet.java +++ b/src/test/org/apache/hadoop/hbase/TestGet.java @@ -172,10 +172,6 @@ public class TestGet extends HBaseTestCase { r.close(); log.closeAndDelete(); - } catch(IOException e) { - e.printStackTrace(); - throw e; - } finally { if(cluster != null) { cluster.shutdown(); diff --git a/src/test/org/apache/hadoop/hbase/TestHClient.java b/src/test/org/apache/hadoop/hbase/TestHClient.java deleted file mode 100644 index c51108bd7de..00000000000 --- a/src/test/org/apache/hadoop/hbase/TestHClient.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -/** - * Test HClient. - */ -@Deprecated -public class TestHClient extends HBaseClusterTestCase { - private Log LOG = LogFactory.getLog(this.getClass().getName()); - private HClient client; - - /** {@inheritDoc} */ - @Override - public void setUp() throws Exception { - super.setUp(); - this.client = new HClient(this.conf); - } - - /** the test - * @throws Exception - */ - public void testCommandline() throws Exception { - final String m = "--master=" + this.conf.get(HConstants.MASTER_ADDRESS); - LOG.info("Creating table"); - // Assert each of below returns 0: i.e. success. - assertEquals("create table", 0, - this.client.doCommandLine( - new String [] {m, "createTable", getName(), "family:", "1"})); - assertEquals("list tables", 0, - this.client.doCommandLine(new String [] {m, "listTables"})); - assertEquals("delete table", 0, - this.client.doCommandLine(new String [] {m, "deleteTable", getName()})); - } -} \ No newline at end of file diff --git a/src/test/org/apache/hadoop/hbase/TestHLog.java b/src/test/org/apache/hadoop/hbase/TestHLog.java index 46d8467d0e1..2fb542954c1 100644 --- a/src/test/org/apache/hadoop/hbase/TestHLog.java +++ b/src/test/org/apache/hadoop/hbase/TestHLog.java @@ -37,70 +37,67 @@ public class TestHLog extends HBaseTestCase implements HConstants { super.setUp(); } - /** The test */ - public void testAppend() { + /** + * @throws IOException + */ + public void testAppend() throws IOException { + Path dir = getUnitTestdir(getName()); + FileSystem fs = FileSystem.get(this.conf); + if (fs.exists(dir)) { + fs.delete(dir); + } + final int COL_COUNT = 10; + final Text regionName = new Text("regionname"); + final Text tableName = new Text("tablename"); + final Text row = new Text("row"); + Reader reader = null; + HLog log = new HLog(fs, dir, this.conf); try { - Path dir = getUnitTestdir(getName()); - FileSystem fs = FileSystem.get(this.conf); + // Write columns named 1, 2, 3, etc. and then values of single byte + // 1, 2, 3... + TreeMap cols = new TreeMap(); + for (int i = 0; i < COL_COUNT; i++) { + cols.put(new Text(Integer.toString(i)), + new byte[] { (byte)(i + '0') }); + } + long timestamp = System.currentTimeMillis(); + log.append(regionName, tableName, row, cols, timestamp); + long logSeqId = log.startCacheFlush(); + log.completeCacheFlush(regionName, tableName, logSeqId); + log.close(); + Path filename = log.computeFilename(log.filenum - 1); + log = null; + // Now open a reader on the log and assert append worked. + reader = new SequenceFile.Reader(fs, filename, conf); + HLogKey key = new HLogKey(); + HLogEdit val = new HLogEdit(); + for (int i = 0; i < COL_COUNT; i++) { + reader.next(key, val); + assertEquals(regionName, key.getRegionName()); + assertEquals(tableName, key.getTablename()); + assertEquals(row, key.getRow()); + assertEquals((byte)(i + '0'), val.getVal()[0]); + System.out.println(key + " " + val); + } + while (reader.next(key, val)) { + // Assert only one more row... the meta flushed row. + assertEquals(regionName, key.getRegionName()); + assertEquals(tableName, key.getTablename()); + assertEquals(HLog.METAROW, key.getRow()); + assertEquals(HLog.METACOLUMN, val.getColumn()); + assertEquals(0, HGlobals.completeCacheFlush.compareTo(val.getVal())); + System.out.println(key + " " + val); + } + } finally { + if (log != null) { + log.closeAndDelete(); + } + if (reader != null) { + reader.close(); + } if (fs.exists(dir)) { fs.delete(dir); } - final int COL_COUNT = 10; - final Text regionName = new Text("regionname"); - final Text tableName = new Text("tablename"); - final Text row = new Text("row"); - Reader reader = null; - HLog log = new HLog(fs, dir, this.conf); - try { - // Write columns named 1, 2, 3, etc. and then values of single byte - // 1, 2, 3... - TreeMap cols = new TreeMap(); - for (int i = 0; i < COL_COUNT; i++) { - cols.put(new Text(Integer.toString(i)), - new byte[] { (byte)(i + '0') }); - } - long timestamp = System.currentTimeMillis(); - log.append(regionName, tableName, row, cols, timestamp); - long logSeqId = log.startCacheFlush(); - log.completeCacheFlush(regionName, tableName, logSeqId); - log.close(); - Path filename = log.computeFilename(log.filenum - 1); - log = null; - // Now open a reader on the log and assert append worked. - reader = new SequenceFile.Reader(fs, filename, conf); - HLogKey key = new HLogKey(); - HLogEdit val = new HLogEdit(); - for (int i = 0; i < COL_COUNT; i++) { - reader.next(key, val); - assertEquals(regionName, key.getRegionName()); - assertEquals(tableName, key.getTablename()); - assertEquals(row, key.getRow()); - assertEquals((byte)(i + '0'), val.getVal()[0]); - System.out.println(key + " " + val); - } - while (reader.next(key, val)) { - // Assert only one more row... the meta flushed row. - assertEquals(regionName, key.getRegionName()); - assertEquals(tableName, key.getTablename()); - assertEquals(HLog.METAROW, key.getRow()); - assertEquals(HLog.METACOLUMN, val.getColumn()); - assertEquals(0, HGlobals.completeCacheFlush.compareTo(val.getVal())); - System.out.println(key + " " + val); - } - } finally { - if (log != null) { - log.closeAndDelete(); - } - if (reader != null) { - reader.close(); - } - if (fs.exists(dir)) { - fs.delete(dir); - } - } - } catch(IOException e) { - e.printStackTrace(); - fail(); } } diff --git a/src/test/org/apache/hadoop/hbase/TestHMemcache.java b/src/test/org/apache/hadoop/hbase/TestHMemcache.java index 61e7a6ffc0d..23a78954ad5 100644 --- a/src/test/org/apache/hadoop/hbase/TestHMemcache.java +++ b/src/test/org/apache/hadoop/hbase/TestHMemcache.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.io.UnsupportedEncodingException; -import java.util.Iterator; import java.util.Map; import java.util.TreeMap; @@ -46,11 +45,9 @@ public class TestHMemcache extends TestCase { private static final String COLUMN_FAMILY = "column"; - /* (non-Javadoc) - * @see junit.framework.TestCase#setUp() - */ + /** {@inheritDoc} */ @Override - protected void setUp() throws Exception { + public void setUp() throws Exception { super.setUp(); this.hmemcache = new HMemcache(); // Set up a configuration that has configuration for a file @@ -58,11 +55,9 @@ public class TestHMemcache extends TestCase { this.conf = new HBaseConfiguration(); } - /* (non-Javadoc) - * @see junit.framework.TestCase#tearDown() - */ + /** {@inheritDoc} */ @Override - protected void tearDown() throws Exception { + public void tearDown() throws Exception { super.tearDown(); } @@ -70,10 +65,8 @@ public class TestHMemcache extends TestCase { return new Text("row" + Integer.toString(index)); } - private Text getColumnName(final int rowIndex, - final int colIndex) { - return new Text(COLUMN_FAMILY + ":" + - Integer.toString(rowIndex) + ";" + + private Text getColumnName(final int rowIndex, final int colIndex) { + return new Text(COLUMN_FAMILY + ":" + Integer.toString(rowIndex) + ";" + Integer.toString(colIndex)); } @@ -81,16 +74,12 @@ public class TestHMemcache extends TestCase { * Adds {@link #ROW_COUNT} rows and {@link #COLUMNS_COUNT} * @param hmc Instance to add rows to. */ - private void addRows(final HMemcache hmc) { + private void addRows(final HMemcache hmc) throws UnsupportedEncodingException { for (int i = 0; i < ROW_COUNT; i++) { TreeMap columns = new TreeMap(); for (int ii = 0; ii < COLUMNS_COUNT; ii++) { Text k = getColumnName(i, ii); - try { - columns.put(k, k.toString().getBytes(HConstants.UTF8_ENCODING)); - } catch (UnsupportedEncodingException e) { - fail(); - } + columns.put(k, k.toString().getBytes(HConstants.UTF8_ENCODING)); } hmc.add(getRowName(i), columns, System.currentTimeMillis()); } @@ -98,8 +87,8 @@ public class TestHMemcache extends TestCase { private HLog getLogfile() throws IOException { // Create a log file. - Path testDir = new Path(conf.get("hadoop.tmp.dir", System - .getProperty("java.tmp.dir")), "hbase"); + Path testDir = new Path(conf.get("hadoop.tmp.dir", + System.getProperty("java.tmp.dir")), "hbase"); Path logFile = new Path(testDir, this.getName()); FileSystem fs = testDir.getFileSystem(conf); // Cleanup any old log file. @@ -110,7 +99,8 @@ public class TestHMemcache extends TestCase { } private Snapshot runSnapshot(final HMemcache hmc, final HLog log) - throws IOException { + throws IOException { + // Save off old state. int oldHistorySize = hmc.history.size(); TreeMap oldMemcache = hmc.memcache; @@ -151,12 +141,12 @@ public class TestHMemcache extends TestCase { log.closeAndDelete(); } - private void isExpectedRow(final int rowIndex, - TreeMap row) throws UnsupportedEncodingException { + private void isExpectedRow(final int rowIndex, TreeMap row) + throws UnsupportedEncodingException { + int i = 0; for (Text colname: row.keySet()) { - String expectedColname = - getColumnName(rowIndex, i++).toString(); + String expectedColname = getColumnName(rowIndex, i++).toString(); String colnameStr = colname.toString(); assertEquals("Column name", colnameStr, expectedColname); // Value is column name as bytes. Usually result is @@ -204,9 +194,7 @@ public class TestHMemcache extends TestCase { assertEquals("Count of columns", COLUMNS_COUNT, results.size()); TreeMap row = new TreeMap(); - for(Iterator> it = results.entrySet().iterator(); - it.hasNext(); ) { - Map.Entry e = it.next(); + for(Map.Entry e: results.entrySet() ) { row.put(e.getKey(), e.getValue()); } isExpectedRow(i, row); diff --git a/src/test/org/apache/hadoop/hbase/TestHRegion.java b/src/test/org/apache/hadoop/hbase/TestHRegion.java index e6e1204aedf..177b25b226c 100644 --- a/src/test/org/apache/hadoop/hbase/TestHRegion.java +++ b/src/test/org/apache/hadoop/hbase/TestHRegion.java @@ -587,7 +587,7 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe } // NOTE: This test depends on testBatchWrite succeeding - void splitAndMerge() throws IOException { + private void splitAndMerge() throws IOException { Text midKey = new Text(); if(region.needsSplit(midKey)) { @@ -829,8 +829,10 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe } catch (IOException e) { e.printStackTrace(); } - cluster.shutdown(); - cluster = null; + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } // Delete all the DFS files diff --git a/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java b/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java index 7846bb0f6aa..27d25377f69 100644 --- a/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java +++ b/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java @@ -38,17 +38,11 @@ public class TestMasterAdmin extends HBaseClusterTestCase { admin = null; } - /** the test */ - public void testMasterAdmin() { - try { - admin = new HBaseAdmin(conf); - admin.createTable(testDesc); - admin.disableTable(testDesc.getName()); - - } catch(Exception e) { - e.printStackTrace(); - fail(); - } + /** @throws Exception */ + public void testMasterAdmin() throws Exception { + admin = new HBaseAdmin(conf); + admin.createTable(testDesc); + admin.disableTable(testDesc.getName()); try { try { @@ -76,13 +70,7 @@ public class TestMasterAdmin extends HBaseClusterTestCase { fail(); } finally { - try { - admin.deleteTable(testDesc.getName()); - - } catch(Exception e) { - e.printStackTrace(); - fail(); - } + admin.deleteTable(testDesc.getName()); } } } diff --git a/src/test/org/apache/hadoop/hbase/TestMergeMeta.java b/src/test/org/apache/hadoop/hbase/TestMergeMeta.java index f5e10ef86e2..21bae460f77 100644 --- a/src/test/org/apache/hadoop/hbase/TestMergeMeta.java +++ b/src/test/org/apache/hadoop/hbase/TestMergeMeta.java @@ -19,19 +19,17 @@ */ package org.apache.hadoop.hbase; +import java.io.IOException; + /** Tests region merging */ public class TestMergeMeta extends AbstractMergeTestBase { /** * test case + * @throws IOException */ - public void testMergeMeta() { - try { - HMerge.merge(conf, fs, HConstants.META_TABLE_NAME); - - } catch(Throwable t) { - t.printStackTrace(); - fail(); - } + public void testMergeMeta() throws IOException { + assertNotNull(dfsCluster); + HMerge.merge(conf, fs, HConstants.META_TABLE_NAME); } } diff --git a/src/test/org/apache/hadoop/hbase/TestMergeTable.java b/src/test/org/apache/hadoop/hbase/TestMergeTable.java index a7ad513f758..9cf316246d5 100644 --- a/src/test/org/apache/hadoop/hbase/TestMergeTable.java +++ b/src/test/org/apache/hadoop/hbase/TestMergeTable.java @@ -31,6 +31,7 @@ public class TestMergeTable extends AbstractMergeTestBase { * @throws IOException */ public void testMergeTable() throws IOException { + assertNotNull(dfsCluster); MiniHBaseCluster hCluster = new MiniHBaseCluster(conf, 1, dfsCluster); try { HMerge.merge(conf, fs, desc.getName()); diff --git a/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java b/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java index dfed6b4a26d..fc1331e9a25 100644 --- a/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java +++ b/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java @@ -40,15 +40,9 @@ public class TestMultipleUpdates extends HBaseClusterTestCase { super.setUp(); this.desc = new HTableDescriptor("test"); desc.addFamily(new HColumnDescriptor(CONTENTS_STR)); - try { - HBaseAdmin admin = new HBaseAdmin(conf); - admin.createTable(desc); - table = new HTable(conf, desc.getName()); - - } catch (Exception e) { - e.printStackTrace(); - fail(); - } + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); + table = new HTable(conf, desc.getName()); } /** the test */ diff --git a/src/test/org/apache/hadoop/hbase/TestScanner.java b/src/test/org/apache/hadoop/hbase/TestScanner.java index dde52a4a3de..c6867f31bd1 100644 --- a/src/test/org/apache/hadoop/hbase/TestScanner.java +++ b/src/test/org/apache/hadoop/hbase/TestScanner.java @@ -108,10 +108,6 @@ public class TestScanner extends HBaseTestCase { results.clear(); } - } catch(IOException e) { - e.printStackTrace(); - throw e; - } finally { if(scanner != null) { scanner.close(); @@ -258,9 +254,6 @@ public class TestScanner extends HBaseTestCase { region.close(); log.closeAndDelete(); - } catch(IOException e) { - e.printStackTrace(); - throw e; } finally { if(cluster != null) {