HBASE-10371 Compaction creates empty hfile, then selects this file for compaction and creates empty hfile and over again

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1559247 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2014-01-17 20:49:19 +00:00
parent e7c36e7822
commit 4381308ac8
3 changed files with 52 additions and 3 deletions

View File

@ -158,7 +158,11 @@ public class RatioBasedCompactionPolicy extends CompactionPolicy {
expiredStoreFiles.add(storeFile);
}
}
if (expiredStoreFiles != null && expiredStoreFiles.size() == 1
&& expiredStoreFiles.get(0).getReader().getEntries() == 0) {
// If just one empty store file, do not select for compaction.
return null;
}
return expiredStoreFiles;
}

View File

@ -36,6 +36,8 @@ public class MockStoreFile extends StoreFile {
long sequenceid;
private Map<byte[], byte[]> metadata = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
byte[] splitPoint = null;
TimeRangeTracker timeRangeTracker;
long entryCount;
MockStoreFile(HBaseTestingUtility testUtil, Path testPath,
long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException {
@ -85,14 +87,34 @@ public class MockStoreFile extends StoreFile {
this.metadata.put(key, value);
}
void setTimeRangeTracker(TimeRangeTracker timeRangeTracker) {
this.timeRangeTracker = timeRangeTracker;
}
void setEntries(long entryCount) {
this.entryCount = entryCount;
}
@Override
public StoreFile.Reader getReader() {
final long len = this.length;
final TimeRangeTracker timeRange = this.timeRangeTracker;
final long entries = this.entryCount;
return new StoreFile.Reader() {
@Override
public long length() {
return len;
}
@Override
public long getMaxTimestamp() {
return timeRange == null ? Long.MAX_VALUE : timeRange.maximumTimestamp;
}
@Override
public long getEntries() {
return entries;
}
};
}
}

View File

@ -36,8 +36,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
@ -304,4 +302,29 @@ public class TestDefaultCompactSelection extends TestCase {
// Prefer later compaction if the benefit is significant.
compactEquals(sfCreate(99,99,99,99, 27,27,27,20,20,20), 20, 20, 20);
}
public void testCompactionEmptyHFile() throws IOException {
// Set TTL
ScanInfo oldScanInfo = store.getScanInfo();
ScanInfo newScanInfo = new ScanInfo(oldScanInfo.getFamily(),
oldScanInfo.getMinVersions(), oldScanInfo.getMaxVersions(), 600,
oldScanInfo.getKeepDeletedCells(), oldScanInfo.getTimeToPurgeDeletes(),
oldScanInfo.getComparator());
store.setScanInfo(newScanInfo);
// Do not compact empty store file
List<StoreFile> candidates = sfCreate(0);
for (StoreFile file : candidates) {
if (file instanceof MockStoreFile) {
MockStoreFile mockFile = (MockStoreFile) file;
mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
mockFile.setEntries(0);
}
}
// Test Default compactions
CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine
.getCompactionPolicy()).selectCompaction(candidates,
new ArrayList<StoreFile>(), false, false, false);
assertTrue(result.getFiles().size() == 0);
store.setScanInfo(oldScanInfo);
}
}