HBASE-8870 Store.needsCompaction() should include minFilesToCompact (Liang Xie)

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1526320 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
sershe 2013-09-25 22:17:35 +00:00
parent 22b77dc791
commit a21deb20f1
4 changed files with 40 additions and 46 deletions

View File

@ -392,6 +392,6 @@ public class RatioBasedCompactionPolicy extends CompactionPolicy {
public boolean needsCompaction(final Collection<StoreFile> storeFiles, public boolean needsCompaction(final Collection<StoreFile> storeFiles,
final List<StoreFile> filesCompacting) { final List<StoreFile> filesCompacting) {
int numCandidates = storeFiles.size() - filesCompacting.size(); int numCandidates = storeFiles.size() - filesCompacting.size();
return numCandidates > comConf.getMinFilesToCompact(); return numCandidates >= comConf.getMinFilesToCompact();
} }
} }

View File

@ -1165,6 +1165,30 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return new HTable(getConfiguration(), tableName); return new HTable(getConfiguration(), tableName);
} }
/**
* Create a table.
* @param htd
* @param families
* @param c Configuration to use
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
throws IOException {
for(byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family);
// Disable blooms (they are on by default as of 0.95) but we disable them here because
// tests have hard coded counts of what to expect in block cache, etc., and blooms being
// on is interfering.
hcd.setBloomFilterType(BloomType.NONE);
htd.addFamily(hcd);
}
getHBaseAdmin().createTable(htd);
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
waitUntilAllRegionsAssigned(htd.getTableName());
return new HTable(c, htd.getTableName());
}
/** /**
* Create a table. * Create a table.
* @param tableName * @param tableName
@ -1176,19 +1200,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
public HTable createTable(TableName tableName, byte[][] families, public HTable createTable(TableName tableName, byte[][] families,
final Configuration c) final Configuration c)
throws IOException { throws IOException {
HTableDescriptor desc = new HTableDescriptor(tableName); return createTable(new HTableDescriptor(tableName), families, c);
for(byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family);
// Disable blooms (they are on by default as of 0.95) but we disable them here because
// tests have hard coded counts of what to expect in block cache, etc., and blooms being
// on is interfering.
hcd.setBloomFilterType(BloomType.NONE);
desc.addFamily(hcd);
}
getHBaseAdmin().createTable(desc);
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
waitUntilAllRegionsAssigned(tableName);
return new HTable(c, tableName);
} }
/** /**

View File

@ -55,9 +55,7 @@ import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.regionserver.StoreScanner;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
@ -249,14 +247,8 @@ public class TestRegionObserverScannerOpenHook {
admin.flush(region.getRegionName()); admin.flush(region.getRegionName());
// run a compaction, which normally would should get rid of the data // run a compaction, which normally would should get rid of the data
Store s = region.getStores().get(A); // wait for the compaction checker to complete
CountDownLatch latch = new CountDownLatch(1); Thread.sleep(1000);
WaitableCompactionRequest request = new WaitableCompactionRequest(s.getStorefiles(), latch);
rs.compactSplitThread.requestCompaction(region, s,
"compact for testRegionObserverCompactionTimeStacking", Store.PRIORITY_USER, request);
// wait for the compaction to complete
latch.await();
// check both rows to ensure that they aren't there // check both rows to ensure that they aren't there
Get get = new Get(ROW); Get get = new Get(ROW);
Result r = table.get(get); Result r = table.get(get);
@ -273,26 +265,4 @@ public class TestRegionObserverScannerOpenHook {
table.close(); table.close();
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }
/**
* A simple compaction on which you can wait for the passed in latch until the compaction finishes
* (either successfully or if it failed).
*/
public static class WaitableCompactionRequest extends CompactionRequest {
private CountDownLatch done;
/**
* Constructor for a custom compaction. Uses the setXXX methods to update the state of the
* compaction before being used.
*/
public WaitableCompactionRequest(Collection<StoreFile> files, CountDownLatch finished) {
super(files);
this.done = finished;
}
@Override
public void afterExecute() {
this.done.countDown();
}
}
} }

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
@ -280,11 +281,19 @@ public class TestSnapshotFromMaster {
HBaseAdmin admin = UTIL.getHBaseAdmin(); HBaseAdmin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots // make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.assertNoSnapshots(admin);
// recreate test table with disabled compactions; otherwise compaction may happen before
// snapshot, the call after snapshot will be a no-op and checks will fail
UTIL.deleteTable(TABLE_NAME);
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
htd.setCompactionEnabled(false);
UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration());
// load the table (creates 4 hfiles) // load the table (creates 4 hfiles)
UTIL.loadTable(new HTable(UTIL.getConfiguration(), TABLE_NAME), TEST_FAM); UTIL.loadTable(new HTable(UTIL.getConfiguration(), TABLE_NAME), TEST_FAM);
// disable the table so we can take a snapshot // disable the table so we can take a snapshot
admin.disableTable(TABLE_NAME); admin.disableTable(TABLE_NAME);
htd.setCompactionEnabled(true);
// take a snapshot of the table // take a snapshot of the table
String snapshotName = "snapshot"; String snapshotName = "snapshot";
@ -298,6 +307,9 @@ public class TestSnapshotFromMaster {
// ensure we only have one snapshot // ensure we only have one snapshot
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshotNameBytes, TABLE_NAME); SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshotNameBytes, TABLE_NAME);
// enable compactions now
admin.modifyTable(TABLE_NAME, htd);
// renable the table so we can compact the regions // renable the table so we can compact the regions
admin.enableTable(TABLE_NAME); admin.enableTable(TABLE_NAME);