diff --git a/CHANGES.txt b/CHANGES.txt index bee4b72f8c5..01b6136034a 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -700,7 +700,7 @@ Release 0.92.0 - Unreleased HBASE-4694 Some cleanup of log messages in RS and M HBASE-4603 Uneeded sleep time for tests in hbase.master.ServerManager#waitForRegionServers (nkeywal) - + HBASE-4703 Improvements in tests (nkeywal) TASKS HBASE-3559 Move report of split to master OFF the heartbeat channel diff --git a/pom.xml b/pom.xml index ee1abcc62c6..7ef1e100d21 100644 --- a/pom.xml +++ b/pom.xml @@ -770,7 +770,6 @@ 6.1.14 1.4 1.6.0 - 1.1.1 4.8.2 1.2.16 1.8.5 @@ -1035,11 +1034,6 @@ jersey-server ${jersey.version} - - javax.ws.rs - jsr311-api - ${jsr311.version} - javax.xml.bind jaxb-api diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 60af1f7558c..f870ab22ebe 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1286,12 +1286,14 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, new HDFSBlocksDistribution(); long totalStaticIndexSize = 0; long totalStaticBloomSize = 0; + long totalMslabWaste = 0; long tmpfiles; long tmpindex; long tmpfilesize; long tmpbloomsize; long tmpstaticsize; + long tmpMslabWaste; String cfname; // Note that this is a map of Doubles instead of Longs. This is because we @@ -1315,6 +1317,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, tmpfilesize = store.getStorefilesSize(); tmpbloomsize = store.getTotalStaticBloomSize(); tmpstaticsize = store.getTotalStaticIndexSize(); + tmpMslabWaste = store.memstore.getMslabWaste(); // Note that there is only one store per CF so setting is safe cfname = "cf." + store.toString(); @@ -1329,11 +1332,14 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, (store.getMemStoreSize() / (1024.0 * 1024))); this.incrMap(tempVals, cfname + ".staticIndexSizeKB", tmpstaticsize / 1024.0); + this.incrMap(tempVals, cfname + ".mslabWasteKB", + tmpMslabWaste / 1024.0); storefiles += tmpfiles; storefileIndexSize += tmpindex; totalStaticIndexSize += tmpstaticsize; totalStaticBloomSize += tmpbloomsize; + totalMslabWaste += tmpMslabWaste; } } @@ -1353,6 +1359,8 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, (int) (totalStaticIndexSize / 1024)); this.metrics.totalStaticBloomSizeKB.set( (int) (totalStaticBloomSize / 1024)); + this.metrics.totalMslabWasteKB.set( + (int) (totalMslabWaste / 1024)); this.metrics.readRequestsCount.set(readRequestsCount); this.metrics.writeRequestsCount.set(writeRequestsCount); diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 747a90b783d..48e1acc86be 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -124,6 +124,18 @@ public class MemStore implements HeapSize { } } + /** + * @return the number of bytes "wasted" by external fragmentation + * in the MSLAB, if configured. + */ + long getMslabWaste() { + if (allocator != null) { + return allocator.getWastedBytes(); + } else { + return 0; + } + } + void dump() { for (KeyValue kv: this.kvset) { LOG.info(kv); diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java b/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java index cbb76e8be09..d5d1fd30fc0 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; @@ -56,7 +57,9 @@ public class MemStoreLAB { final static String MAX_ALLOC_KEY = "hbase.hregion.memstore.mslab.max.allocation"; final static int MAX_ALLOC_DEFAULT = 256 * 1024; // allocs bigger than this don't go through allocator final int maxAlloc; - + + private final AtomicLong wastedSpace = new AtomicLong(); + public MemStoreLAB() { this(new Configuration()); } @@ -103,22 +106,35 @@ public class MemStoreLAB { } } + public long getWastedBytes() { + Chunk cur = curChunk.get(); + long ret = wastedSpace.get(); + if (cur != null) { + ret += cur.getFreeSpace(); + } + return ret; + } + /** * Try to retire the current chunk if it is still * c. Postcondition is that curChunk.get() * != c */ private void tryRetireChunk(Chunk c) { - @SuppressWarnings("unused") boolean weRetiredIt = curChunk.compareAndSet(c, null); // If the CAS succeeds, that means that we won the race - // to retire the chunk. We could use this opportunity to - // update metrics on external fragmentation. - // + // to retire the chunk. // If the CAS fails, that means that someone else already // retired the chunk for us. + if (weRetiredIt) { + // This isn't quite right, since another thread may + // have a small allocation concurrently with our retiring + // the chunk. But it should be very close to right, + // and this is just for metrics. + wastedSpace.addAndGet(c.getFreeSpace()); + } } - + /** * Get the current chunk, or, if there is no current chunk, * allocate a new one from the JVM. @@ -239,6 +255,15 @@ public class MemStoreLAB { " allocs=" + allocCount.get() + "waste=" + (data.length - nextFreeOffset.get()); } + + private int getFreeSpace() { + int off = nextFreeOffset.get(); + if (off >= 0) { + return data.length - off; + } else { + return 0; + } + } } /** diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java b/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java index d512686001a..03afaae8e6a 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java @@ -155,6 +155,9 @@ public class RegionServerMetrics implements Updater { public final MetricsIntValue totalStaticBloomSizeKB = new MetricsIntValue("totalStaticBloomSizeKB", registry); + /** Total amount of memory wasted by external fragmentation in MSLABs */ + public final MetricsIntValue totalMslabWasteKB = + new MetricsIntValue("totalMslabWasteKB", registry); /** * HDFS blocks locality index */ diff --git a/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 20569ab16d8..0a6f703be46 100644 --- a/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -251,13 +251,12 @@ public class HBaseTestingUtility { } String randomStr = UUID.randomUUID().toString(); - Path testDir= new Path( + Path testPath= new Path( getBaseTestDir(), randomStr ); - dataTestDir = new File(testDir.toString()).getAbsoluteFile(); - // Have it cleaned up on exit + dataTestDir = new File(testPath.toString()).getAbsoluteFile(); dataTestDir.deleteOnExit(); } @@ -400,7 +399,6 @@ public class HBaseTestingUtility { throws Exception { File zkClusterFile = new File(getClusterTestDir().toString()); return startMiniZKCluster(zkClusterFile, zooKeeperServerNum); - } private MiniZooKeeperCluster startMiniZKCluster(final File dir) @@ -1389,7 +1387,7 @@ public class HBaseTestingUtility { while (!admin.isTableAvailable(table)) { assertTrue("Timed out waiting for table " + Bytes.toStringBinary(table), System.currentTimeMillis() - startWait < timeoutMillis); - Thread.sleep(500); + Thread.sleep(200); } } @@ -1402,13 +1400,14 @@ public class HBaseTestingUtility { */ public boolean ensureSomeRegionServersAvailable(final int num) throws IOException { - if (this.getHBaseCluster().getLiveRegionServerThreads().size() < num) { - // Need at least "num" servers. - LOG.info("Started new server=" + - this.getHBaseCluster().startRegionServer()); - return true; + boolean startedServer = false; + + for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i expectedContents = cacheOnWrite ? new ArrayList() : null; long totalSize = writeBlocks(rand, algo, path, expectedOffsets, - expectedPrevOffsets, expectedTypes, expectedContents, true); + expectedPrevOffsets, expectedTypes, expectedContents); FSDataInputStream is = fs.open(path); HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo, @@ -263,10 +264,13 @@ public class TestHFileBlock { } assertEquals(expectedOffsets.get(i).longValue(), curOffset); - - LOG.info("Reading block #" + i + " at offset " + curOffset); + if (detailedLogging) { + LOG.info("Reading block #" + i + " at offset " + curOffset); + } HFileBlock b = hbr.readBlockData(curOffset, -1, -1, pread); - LOG.info("Block #" + i + ": " + b); + if (detailedLogging) { + LOG.info("Block #" + i + ": " + b); + } assertEquals("Invalid block #" + i + "'s type:", expectedTypes.get(i), b.getBlockType()); assertEquals("Invalid previous block offset for block " + i @@ -388,8 +392,9 @@ public class TestHFileBlock { ++numWithOnDiskSize; } LOG.info("Client " + clientId + " successfully read " + numBlocksRead + - " blocks (with pread: " + numPositionalRead + ", with onDiskSize " + - "specified: " + numWithOnDiskSize + ")"); + " blocks (with pread: " + numPositionalRead + ", with onDiskSize " + + "specified: " + numWithOnDiskSize + ")"); + return true; } @@ -403,7 +408,7 @@ public class TestHFileBlock { Random rand = defaultRandom(); List offsets = new ArrayList(); List types = new ArrayList(); - writeBlocks(rand, compressAlgo, path, offsets, null, types, null, false); + writeBlocks(rand, compressAlgo, path, offsets, null, types, null); FSDataInputStream is = fs.open(path); long fileSize = fs.getFileStatus(path).getLen(); HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, compressAlgo, @@ -421,9 +426,11 @@ public class TestHFileBlock { for (int i = 0; i < NUM_READER_THREADS; ++i) { Future result = ecs.take(); assertTrue(result.get()); - LOG.info(String.valueOf(i + 1) + if (detailedLogging) { + LOG.info(String.valueOf(i + 1) + " reader threads finished successfully (algo=" + compressAlgo + ")"); + } } is.close(); @@ -432,8 +439,8 @@ public class TestHFileBlock { private long writeBlocks(Random rand, Compression.Algorithm compressAlgo, Path path, List expectedOffsets, List expectedPrevOffsets, - List expectedTypes, List expectedContents, - boolean detailedLogging) throws IOException { + List expectedTypes, List expectedContents + ) throws IOException { boolean cacheOnWrite = expectedContents != null; FSDataOutputStream os = fs.create(path); HFileBlock.Writer hbw = new HFileBlock.Writer(compressAlgo); diff --git a/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 7357d4e4a55..8e67ed57713 100644 --- a/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -53,9 +53,8 @@ public class TestLruBlockCache extends TestCase { // Let the eviction run int n = 0; while(cache.getEvictionCount() == 0) { - System.out.println("sleep"); - Thread.sleep(1000); - assertTrue(n++ < 2); + Thread.sleep(200); + assertTrue(n++ < 10); } System.out.println("Background Evictions run: " + cache.getEvictionCount()); diff --git a/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java index 19bb0abb2b2..f2f32bb2c27 100644 --- a/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java +++ b/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java @@ -73,7 +73,6 @@ public class TestReseekTo { String value = valueList.get(i); long start = System.nanoTime(); scanner.seekTo(Bytes.toBytes(key)); - System.out.println("Seek Finished in: " + (System.nanoTime() - start)/1000 + " micro s"); assertEquals(value, scanner.getValueString()); } @@ -83,7 +82,6 @@ public class TestReseekTo { String value = valueList.get(i); long start = System.nanoTime(); scanner.reseekTo(Bytes.toBytes(key)); - System.out.println("Reseek Finished in: " + (System.nanoTime() - start)/1000 + " micro s"); assertEquals(value, scanner.getValueString()); } } diff --git a/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java index 558b3edf74e..09ad4685493 100644 --- a/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java +++ b/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java @@ -67,12 +67,12 @@ public class TestTableInputFormat { @BeforeClass public static void beforeClass() throws Exception { - UTIL.startMiniCluster(1); + UTIL.startMiniCluster(); } @AfterClass - public static void afterClass() throws IOException { - UTIL.getMiniHBaseCluster().shutdown(); + public static void afterClass() throws Exception { + UTIL.shutdownMiniCluster(); } @Before diff --git a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index 72dca7f13e9..a277649d578 100644 --- a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -35,6 +35,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Random; +import junit.framework.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -410,7 +411,7 @@ public class TestHFileOutputFormat { admin.disableTable(table.getTableName()); while(util.getMiniHBaseCluster().getMaster().getAssignmentManager(). isRegionsInTransition()) { - Threads.sleep(1000); + Threads.sleep(200); LOG.info("Waiting on table to finish disabling"); } byte[][] newStartKeys = generateRandomStartKeys(15); @@ -419,7 +420,7 @@ public class TestHFileOutputFormat { admin.enableTable(table.getTableName()); while (table.getRegionsInfo().size() != 15 || !admin.isTableAvailable(table.getTableName())) { - Thread.sleep(1000); + Thread.sleep(200); LOG.info("Waiting for new region assignment to happen"); } } @@ -449,7 +450,7 @@ public class TestHFileOutputFormat { // Cause regions to reopen admin.disableTable(TABLE_NAME); while (!admin.isTableDisabled(TABLE_NAME)) { - Thread.sleep(1000); + Thread.sleep(200); LOG.info("Waiting for table to disable"); } admin.enableTable(TABLE_NAME); @@ -469,9 +470,11 @@ public class TestHFileOutputFormat { setupRandomGeneratorMapper(job); HFileOutputFormat.configureIncrementalLoad(job, table); FileOutputFormat.setOutputPath(job, outDir); - + + Assert.assertFalse( util.getTestFileSystem().exists(outDir)) ; + assertEquals(table.getRegionsInfo().size(), - job.getNumReduceTasks()); + job.getNumReduceTasks()); assertTrue(job.waitForCompletion(true)); } diff --git a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java index cf53671b90a..aa0092317b2 100644 --- a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java +++ b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java @@ -86,20 +86,6 @@ public class TestTableInputFormatScan { TEST_UTIL.shutdownMiniCluster(); } - @Before - public void setUp() throws Exception { - // nothing - } - - /** - * @throws java.lang.Exception - */ - @After - public void tearDown() throws Exception { - Configuration c = TEST_UTIL.getConfiguration(); - FileUtil.fullyDelete(new File(c.get("hadoop.tmp.dir"))); - } - /** * Pass the key and value to reduce. */ diff --git a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index 621ada01868..dfe036957f3 100644 --- a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -124,6 +124,7 @@ public class TestTimeRangeMapRed { for (Long ts : tsList) { Put put = new Put(key.get()); + put.setWriteToWAL(false); put.add(FAMILY_NAME, COLUMN_NAME, ts, Bytes.toBytes(true)); table.put(put); } @@ -163,6 +164,7 @@ public class TestTimeRangeMapRed { private void prepareTest(final HTable table) throws IOException { for (Map.Entry entry : TIMESTAMP.entrySet()) { Put put = new Put(KEY); + put.setWriteToWAL(false); put.add(FAMILY_NAME, COLUMN_NAME, entry.getKey(), Bytes.toBytes(false)); table.put(put); } diff --git a/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java b/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java index 62b587ea629..a9e0ac24880 100644 --- a/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java +++ b/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java @@ -56,6 +56,7 @@ public class TsvImporterCustomTestMapper extends TsvImporterMapper { ImmutableBytesWritable rowKey = new ImmutableBytesWritable(Bytes.toBytes(valueTokens[0])); Put put = new Put(rowKey.copyBytes()); + put.setWriteToWAL(false); //The value should look like this: VALUE1 or VALUE2. Let's multiply //the integer by 3 diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java b/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java index 01abc64b86f..2924323dbb1 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java @@ -489,6 +489,7 @@ public class TestMasterTransitions { // If start key, add 'aaa'. byte [] row = getStartKey(hri); Put p = new Put(row); + p.setWriteToWAL(false); p.add(getTestFamily(), getTestQualifier(), row); t.put(p); rows++; diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java b/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java index 2c5ed801a00..fd0300152d1 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java @@ -360,6 +360,7 @@ public class TestZKBasedOpenCloseRegion { // If start key, add 'aaa'. byte [] row = getStartKey(hri); Put p = new Put(row); + p.setWriteToWAL(false); p.add(getTestFamily(), getTestQualifier(), row); t.put(p); rows++; diff --git a/src/test/java/org/apache/hadoop/hbase/monitoring/TestMemoryBoundedLogMessageBuffer.java b/src/test/java/org/apache/hadoop/hbase/monitoring/TestMemoryBoundedLogMessageBuffer.java index bf3068f49c4..22cd0285daf 100644 --- a/src/test/java/org/apache/hadoop/hbase/monitoring/TestMemoryBoundedLogMessageBuffer.java +++ b/src/test/java/org/apache/hadoop/hbase/monitoring/TestMemoryBoundedLogMessageBuffer.java @@ -51,7 +51,6 @@ public class TestMemoryBoundedLogMessageBuffer { StringWriter sw = new StringWriter(); buf.dumpTo(new PrintWriter(sw)); String dump = sw.toString(); - System.out.println(dump); assertFalse("The early log messages should be evicted", dump.contains("hello 1\n")); assertTrue("The late log messages should be retained", diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index fda8cf56ae6..e3eb0099127 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -122,6 +122,7 @@ public class TestBlocksRead extends HBaseTestCase { long versionEnd) throws IOException { byte columnBytes[] = Bytes.toBytes(col); Put put = new Put(Bytes.toBytes(row)); + put.setWriteToWAL(false); for (long version = versionStart; version <= versionEnd; version++) { put.add(cf, columnBytes, version, genValue(row, col, version)); diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java index 1cf32cce9ae..d2945fa8d5a 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java @@ -101,6 +101,7 @@ public class TestColumnSeeking { for (String value : values) { for (String row : rows) { Put p = new Put(Bytes.toBytes(row)); + p.setWriteToWAL(false); for (String column : allColumns) { for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { KeyValue kv = @@ -205,6 +206,7 @@ public class TestColumnSeeking { for (String row : rows) { Put p = new Put(Bytes.toBytes(row)); + p.setWriteToWAL(false); for (String column : allColumns) { for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { KeyValue kv = diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index 0479453c2ef..35b8a6c8547 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -394,6 +394,7 @@ public class TestCompaction extends HBaseTestCase { for (int i = 0; i < compactionThreshold; i++) { HRegionIncommon loader = new HRegionIncommon(r); Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i))); + p.setWriteToWAL(false); for (int j = 0; j < jmax; j++) { p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad); } diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index a22e83ceada..0f198152522 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -78,6 +78,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i), i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval)); Put put = new Put(hri.getRegionName()); + put.setWriteToWAL(false); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(hri)); mr.put(put, false); diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 0cb3115c3c6..9aa2b4aed49 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -319,6 +319,7 @@ public class TestHRegion extends HBaseTestCase { System.out.println(String.format("Saving row: %s, with value %s", row, value)); Put put = new Put(Bytes.toBytes(row)); + put.setWriteToWAL(false); put.add(Bytes.toBytes("trans-blob"), null, Bytes.toBytes("value for blob")); put.add(Bytes.toBytes("trans-type"), null, Bytes.toBytes("statement")); @@ -2452,6 +2453,7 @@ public class TestHRegion extends HBaseTestCase { boolean toggle=true; for (long i = 0; i < numRows; i++) { Put put = new Put(Bytes.toBytes(i)); + put.setWriteToWAL(false); put.add(family, qual1, Bytes.toBytes(i % 10)); region.put(put); @@ -2662,6 +2664,7 @@ public class TestHRegion extends HBaseTestCase { for (int r = 0; r < numRows; r++) { byte[] row = Bytes.toBytes("row" + r); Put put = new Put(row); + put.setWriteToWAL(false); byte[] value = Bytes.toBytes(String.valueOf(numPutsFinished)); for (byte[] family : families) { for (byte[] qualifier : qualifiers) { @@ -2879,6 +2882,7 @@ public class TestHRegion extends HBaseTestCase { for (int i = 0; i < duplicate_multiplier; i ++) { for (int j = 0; j < num_unique_rows; j++) { Put put = new Put(Bytes.toBytes("row" + j)); + put.setWriteToWAL(false); put.add(fam1, qf1, version++, val1); region.put(put); } @@ -2932,6 +2936,7 @@ public class TestHRegion extends HBaseTestCase { byte row[] = Bytes.toBytes("row:" + 0); byte column[] = Bytes.toBytes("column:" + 0); Put put = new Put(row); + put.setWriteToWAL(false); for (long idx = 1; idx <= 4; idx++) { put.add(FAMILY, column, idx, Bytes.toBytes("value-version-" + idx)); } @@ -3057,6 +3062,7 @@ public class TestHRegion extends HBaseTestCase { throws IOException { for(int i=startRow; i 50MB expected // should be reasonable for unit test and also cover wraparound @@ -58,12 +60,21 @@ public class TestMemStoreLAB { if (alloc.getData() != lastBuffer) { expectedOff = 0; lastBuffer = alloc.getData(); + slabsUsed++; } assertEquals(expectedOff, alloc.getOffset()); assertTrue("Allocation " + alloc + " overruns buffer", alloc.getOffset() + size <= alloc.getData().length); expectedOff += size; } + + // maximum waste is 1KB per slab plus + // whatever's left in current slab + long expectedWaste = slabsUsed * 1000 + + (lastBuffer.length - expectedOff); + long waste = mslab.getWastedBytes(); + assertTrue("waste should be less than " + expectedWaste + + " but was: " + waste, waste < expectedWaste); } @Test diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java index b8cf95d3b74..3d9ddbff29d 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java @@ -229,9 +229,11 @@ public class TestSeekOptimizations { + columnRestrictionStr + ", " + rowRestrictionStr + ", maxVersions=" + maxVersions + ", lazySeek=" + lazySeekEnabled; long seekCount = StoreFileScanner.getSeekCount() - initialSeekCount; - System.err.println("Seek count: " + seekCount + ", KVs returned: " + if (VERBOSE) { + System.err.println("Seek count: " + seekCount + ", KVs returned: " + actualKVs.size() + ". " + testDesc + (lazySeekEnabled ? "\n" : "")); + } if (lazySeekEnabled) { totalSeekLazy += seekCount; } else { diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index c0a8fea0936..d39a231ab2e 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -81,6 +81,7 @@ public class TestWideScanner extends HBaseTestCase { byte[] b = Bytes.toBytes(String.format("%10d", i)); for (j = 0; j < 100; j++) { Put put = new Put(row); + put.setWriteToWAL(false); put.add(COLUMNS[rng.nextInt(COLUMNS.length)], b, ++ts, b); region.put(put); count++; diff --git a/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java b/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java index d3394c0b19f..a0bd6236497 100644 --- a/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java +++ b/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java @@ -48,7 +48,6 @@ import org.junit.Test; public class TestReplicationSink { private static final Log LOG = LogFactory.getLog(TestReplicationSink.class); private static final int BATCH_SIZE = 10; - private static final long SLEEP_TIME = 500; private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -112,7 +111,6 @@ public class TestReplicationSink { public void setUp() throws Exception { table1 = TEST_UTIL.truncateTable(TABLE_NAME1); table2 = TEST_UTIL.truncateTable(TABLE_NAME2); - Thread.sleep(SLEEP_TIME); } /** diff --git a/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java b/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java index cd9d3e30481..3ce7f877e50 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java @@ -59,7 +59,7 @@ public class TestGzipFilter { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); diff --git a/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java index ea10e69abae..12f99bf7520 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java @@ -71,7 +71,7 @@ public class TestMultiRowResource { @BeforeClass public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); - TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); context = JAXBContext.newInstance( CellModel.class, diff --git a/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java b/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java index 4b9c9bd85ca..721aba41e19 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java @@ -88,6 +88,7 @@ public class TestScannerResource { k[1] = b2; k[2] = b3; Put put = new Put(k); + put.setWriteToWAL(false); put.add(famAndQf[0], famAndQf[1], k); table.put(put); count++; @@ -149,7 +150,7 @@ public class TestScannerResource { @BeforeClass public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); - TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); diff --git a/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java index 1a539adf4a8..6e13f3bcacc 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -139,6 +139,7 @@ public class TestScannersWithFilters { // Insert first half for(byte [] ROW : ROWS_ONE) { Put p = new Put(ROW); + p.setWriteToWAL(false); for(byte [] QUALIFIER : QUALIFIERS_ONE) { p.add(FAMILIES[0], QUALIFIER, VALUES[0]); } @@ -146,6 +147,7 @@ public class TestScannersWithFilters { } for(byte [] ROW : ROWS_TWO) { Put p = new Put(ROW); + p.setWriteToWAL(false); for(byte [] QUALIFIER : QUALIFIERS_TWO) { p.add(FAMILIES[1], QUALIFIER, VALUES[1]); } @@ -155,6 +157,7 @@ public class TestScannersWithFilters { // Insert second half (reverse families) for(byte [] ROW : ROWS_ONE) { Put p = new Put(ROW); + p.setWriteToWAL(false); for(byte [] QUALIFIER : QUALIFIERS_ONE) { p.add(FAMILIES[1], QUALIFIER, VALUES[0]); } @@ -162,6 +165,7 @@ public class TestScannersWithFilters { } for(byte [] ROW : ROWS_TWO) { Put p = new Put(ROW); + p.setWriteToWAL(false); for(byte [] QUALIFIER : QUALIFIERS_TWO) { p.add(FAMILIES[0], QUALIFIER, VALUES[1]); } diff --git a/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java b/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java index 14d24bab53f..749b8db0d15 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java @@ -57,7 +57,7 @@ public class TestSchemaResource { @BeforeClass public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); - TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); diff --git a/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java index 6933f781ee9..c5c81035458 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -75,7 +75,7 @@ public class TestStatusResource { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); diff --git a/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java index c55cb187e44..83cb3255486 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -97,6 +97,7 @@ public class TestTableResource { k[1] = b2; k[2] = b3; Put put = new Put(k); + put.setWriteToWAL(false); put.add(famAndQf[0], famAndQf[1], k); table.put(put); } @@ -109,13 +110,18 @@ public class TestTableResource { // tell the master to split the table admin.split(TABLE); // give some time for the split to happen - try { - Thread.sleep(15 * 1000); - } catch (InterruptedException e) { - LOG.warn(StringUtils.stringifyException(e)); + + long timeout = System.currentTimeMillis() + (15 * 1000); + while (System.currentTimeMillis() < timeout && m.size()!=2){ + try { + Thread.sleep(250); + } catch (InterruptedException e) { + LOG.warn(StringUtils.stringifyException(e)); + } + // check again + m = table.getRegionsInfo(); } - // check again - m = table.getRegionsInfo(); + // should have two regions now assertEquals(m.size(), 2); regionMap = m; diff --git a/src/test/java/org/apache/hadoop/hbase/rest/TestTransform.java b/src/test/java/org/apache/hadoop/hbase/rest/TestTransform.java index a65a9245a0b..d556de48df1 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/TestTransform.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/TestTransform.java @@ -55,7 +55,7 @@ public class TestTransform { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); diff --git a/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java b/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java index f9fb489eb36..7ba0b9fe156 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java @@ -54,7 +54,7 @@ public class TestVersionResource { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); diff --git a/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdmin.java b/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdmin.java index 38295ceeeaf..507e86a081e 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdmin.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdmin.java @@ -34,43 +34,25 @@ import org.junit.BeforeClass; import org.junit.Test; public class TestRemoteAdmin { - private static final String TABLE_1 = "TestRemoteAdmin_Table_1"; - private static final String TABLE_2 = "TestRemoteAdmin_Table_2"; private static final byte[] COLUMN_1 = Bytes.toBytes("a"); - - static final HTableDescriptor DESC_1; - static { - DESC_1 = new HTableDescriptor(TABLE_1); - DESC_1.addFamily(new HColumnDescriptor(COLUMN_1)); - } - static final HTableDescriptor DESC_2; - static { - DESC_2 = new HTableDescriptor(TABLE_2); - DESC_2.addFamily(new HColumnDescriptor(COLUMN_1)); - } - - private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + static final HTableDescriptor DESC_1 = new HTableDescriptor(TABLE_1); + private static final HBaseTestingUtility TEST_UTIL = + new HBaseTestingUtility(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); - private static HBaseAdmin localAdmin; private static RemoteAdmin remoteAdmin; @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(3); + DESC_1.addFamily(new HColumnDescriptor(COLUMN_1)); + + TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - localAdmin = TEST_UTIL.getHBaseAdmin(); + remoteAdmin = new RemoteAdmin(new Client( new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), TEST_UTIL.getConfiguration()); - if (localAdmin.tableExists(TABLE_1)) { - localAdmin.disableTable(TABLE_1); - localAdmin.deleteTable(TABLE_1); - } - if (!localAdmin.tableExists(TABLE_2)) { - localAdmin.createTable(DESC_2); - } } @AfterClass @@ -80,16 +62,11 @@ public class TestRemoteAdmin { } @Test - public void testCreateTable() throws Exception { + public void testCreateAnDeleteTable() throws Exception { assertFalse(remoteAdmin.isTableAvailable(TABLE_1)); remoteAdmin.createTable(DESC_1); assertTrue(remoteAdmin.isTableAvailable(TABLE_1)); - } - - @Test - public void testDeleteTable() throws Exception { - assertTrue(remoteAdmin.isTableAvailable(TABLE_2)); - remoteAdmin.deleteTable(TABLE_2); - assertFalse(remoteAdmin.isTableAvailable(TABLE_2)); + remoteAdmin.deleteTable(TABLE_1); + assertFalse(remoteAdmin.isTableAvailable(TABLE_1)); } } diff --git a/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java index 4c6cf9922c6..bdeb77fe3ad 100644 --- a/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java +++ b/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -75,7 +75,7 @@ public class TestRemoteTable { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); LOG.info("Admin Connection=" + admin.getConnection() + ", " + diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index ca6dd4bc72b..93a459038e9 100644 --- a/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -122,6 +122,7 @@ public class TestHBaseFsck { // When we find a diff RS, change the assignment and break if (startCode != sn.getStartcode()) { Put put = new Put(res.getRow()); + put.setWriteToWAL(false); put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getHostAndPort())); put.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, @@ -135,13 +136,12 @@ public class TestHBaseFsck { // Try to fix the data assertErrors(doFsck(true), new ERROR_CODE[]{ ERROR_CODE.SERVER_DOES_NOT_MATCH_META}); - Thread.sleep(15000); // Should be fixed now assertNoErrors(doFsck(false)); // comment needed - what is the purpose of this line - new HTable(conf, Bytes.toBytes(table)).getScanner(new Scan());; + new HTable(conf, Bytes.toBytes(table)).getScanner(new Scan()); } private HRegionInfo createRegion(Configuration conf, final HTableDescriptor diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java b/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java index a55c2128126..b076abbd11b 100644 --- a/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java +++ b/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java @@ -41,7 +41,7 @@ public class TestIdLock { private static final int NUM_IDS = 16; private static final int NUM_THREADS = 128; - private static final int NUM_SECONDS = 20; + private static final int NUM_SECONDS = 15; private IdLock idLock = new IdLock(); @@ -63,7 +63,6 @@ public class TestIdLock { while (System.currentTimeMillis() < endTime) { long id = rand.nextInt(NUM_IDS); - LOG.info(clientId + " is waiting for id " + id); IdLock.Entry lockEntry = idLock.getLockEntry(id); try { int sleepMs = 1 + rand.nextInt(4); @@ -75,10 +74,7 @@ public class TestIdLock { } idOwner.put(id, clientId); - LOG.info(clientId + " took id " + id + ", sleeping for " + - sleepMs + "ms"); Thread.sleep(sleepMs); - LOG.info(clientId + " is releasing id " + id); idOwner.remove(id); } finally { diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java b/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java index ffc8d9d699c..21a57caa4d0 100644 --- a/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java +++ b/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java @@ -143,6 +143,7 @@ public class TestMergeTable { LOG.info("Created region " + region.getRegionNameAsString()); for(int i = firstRow; i < firstRow + nrows; i++) { Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i))); + put.setWriteToWAL(false); put.add(COLUMN_NAME, null, VALUE); region.put(put); if (i % 10000 == 0) { diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java b/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java index 88dc9de28a8..e4b3d75bcff 100644 --- a/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ b/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -27,12 +27,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseTestCase; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -46,6 +41,7 @@ import org.apache.hadoop.util.ToolRunner; /** Test stand alone merge tool that can merge arbitrary regions */ public class TestMergeTool extends HBaseTestCase { static final Log LOG = LogFactory.getLog(TestMergeTool.class); + HBaseTestingUtility TEST_UTIL; // static final byte [] COLUMN_NAME = Bytes.toBytes("contents:"); static final byte [] FAMILY = Bytes.toBytes("contents"); static final byte [] QUALIFIER = Bytes.toBytes("dc"); @@ -123,7 +119,8 @@ public class TestMergeTool extends HBaseTestCase { "row_1000", "row_1000", "row_1000", "row_1000" }); // Start up dfs - this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null); + TEST_UTIL = new HBaseTestingUtility(conf); + this.dfsCluster = TEST_UTIL.startMiniDFSCluster(2); this.fs = this.dfsCluster.getFileSystem(); System.out.println("fs=" + this.fs); this.conf.set("fs.defaultFS", fs.getUri().toString()); @@ -162,7 +159,7 @@ public class TestMergeTool extends HBaseTestCase { closeRootAndMeta(); } catch (Exception e) { - shutdownDfs(dfsCluster); + TEST_UTIL.shutdownMiniCluster(); throw e; } } @@ -170,7 +167,7 @@ public class TestMergeTool extends HBaseTestCase { @Override public void tearDown() throws Exception { super.tearDown(); - shutdownDfs(dfsCluster); + TEST_UTIL.shutdownMiniCluster(); } /* diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java b/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java index 12e8ba12e41..8479d9b6c30 100644 --- a/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java +++ b/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java @@ -287,6 +287,7 @@ public class TestRegionSplitter { for(byte b=Byte.MIN_VALUE; b