HBASE-19964 Addendum retry on major compaction

This commit is contained in:
zhangduo 2018-02-10 17:43:08 +08:00
parent 3f127063ff
commit b001b1849a
1 changed files with 10 additions and 17 deletions

View File

@ -21,12 +21,12 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -59,30 +59,23 @@ public class TestWriteHeavyIncrementObserver extends WriteHeavyIncrementObserver
// we do not hack scan operation so using scan we could get the original values added into the // we do not hack scan operation so using scan we could get the original values added into the
// table. // table.
try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW) try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW)
.withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) { .withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) {
Result r = scanner.next(); Result r = scanner.next();
assertTrue(r.rawCells().length > 2); assertTrue(r.rawCells().length > 2);
} }
UTIL.flush(NAME); UTIL.flush(NAME);
UTIL.getAdmin().majorCompact(NAME); HRegion region = UTIL.getHBaseCluster().findRegionsForTable(NAME).get(0);
HStore store = UTIL.getHBaseCluster().findRegionsForTable(NAME).get(0).getStore(FAMILY); HStore store = region.getStore(FAMILY);
Waiter.waitFor(UTIL.getConfiguration(), 30000, new Waiter.ExplainingPredicate<Exception>() { for (;;) {
region.compact(true);
@Override if (store.getStorefilesCount() == 1) {
public boolean evaluate() throws Exception { break;
return store.getStorefilesCount() == 1;
} }
}
@Override
public String explainFailure() throws Exception {
return "Major compaction hangs, there are still " + store.getStorefilesCount() +
" store files";
}
});
assertSum(); assertSum();
// Should only have two cells after flush and major compaction // Should only have two cells after flush and major compaction
try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW) try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW)
.withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) { .withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) {
Result r = scanner.next(); Result r = scanner.next();
assertEquals(2, r.rawCells().length); assertEquals(2, r.rawCells().length);
} }