From b001b1849a0f6ab5fe9137fb32c54f6e26089661 Mon Sep 17 00:00:00 2001 From: zhangduo Date: Sat, 10 Feb 2018 17:43:08 +0800 Subject: [PATCH] HBASE-19964 Addendum retry on major compaction --- .../TestWriteHeavyIncrementObserver.java | 27 +++++++------------ 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestWriteHeavyIncrementObserver.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestWriteHeavyIncrementObserver.java index 7ef1558d610..639461bc8af 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestWriteHeavyIncrementObserver.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestWriteHeavyIncrementObserver.java @@ -21,12 +21,12 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -59,30 +59,23 @@ public class TestWriteHeavyIncrementObserver extends WriteHeavyIncrementObserver // we do not hack scan operation so using scan we could get the original values added into the // table. try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW) - .withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) { + .withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) { Result r = scanner.next(); assertTrue(r.rawCells().length > 2); } UTIL.flush(NAME); - UTIL.getAdmin().majorCompact(NAME); - HStore store = UTIL.getHBaseCluster().findRegionsForTable(NAME).get(0).getStore(FAMILY); - Waiter.waitFor(UTIL.getConfiguration(), 30000, new Waiter.ExplainingPredicate() { - - @Override - public boolean evaluate() throws Exception { - return store.getStorefilesCount() == 1; + HRegion region = UTIL.getHBaseCluster().findRegionsForTable(NAME).get(0); + HStore store = region.getStore(FAMILY); + for (;;) { + region.compact(true); + if (store.getStorefilesCount() == 1) { + break; } - - @Override - public String explainFailure() throws Exception { - return "Major compaction hangs, there are still " + store.getStorefilesCount() + - " store files"; - } - }); + } assertSum(); // Should only have two cells after flush and major compaction try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW) - .withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) { + .withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) { Result r = scanner.next(); assertEquals(2, r.rawCells().length); }