HBASE-7902 deletes may be removed during minor compaction, in non-standard compaction schemes

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1451314 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
sershe 2013-02-28 19:46:24 +00:00
parent 4ffa036338
commit 9b1ec93ee3
11 changed files with 16 additions and 20 deletions

View File

@ -201,7 +201,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
Scan scan = new Scan();
scan.setMaxVersions(scanInfo.getMaxVersions());
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.MINOR_COMPACT, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
}
@Override

View File

@ -817,7 +817,7 @@ public class HStore implements Store {
Scan scan = new Scan();
scan.setMaxVersions(scanInfo.getMaxVersions());
scanner = new StoreScanner(this, scanInfo, scan,
Collections.singletonList(memstoreScanner), ScanType.MINOR_COMPACT,
Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
this.region.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
}
if (this.region.getCoprocessorHost() != null) {

View File

@ -155,7 +155,7 @@ public class ScanQueryMatcher {
// keep deleted cells: if compaction or raw scan
this.keepDeletedCells = (scanInfo.getKeepDeletedCells() && !isUserScan) || scan.isRaw();
// retain deletes: if minor compaction or raw scan
this.retainDeletesInOutput = scanType == ScanType.MINOR_COMPACT || scan.isRaw();
this.retainDeletesInOutput = scanType == ScanType.COMPACT_RETAIN_DELETES || scan.isRaw();
// seePastDeleteMarker: user initiated scans
this.seePastDeleteMarkers = scanInfo.getKeepDeletedCells() && isUserScan;

View File

@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
*/
@InterfaceAudience.Private
public enum ScanType {
MAJOR_COMPACT,
MINOR_COMPACT,
COMPACT_DROP_DELETES,
COMPACT_RETAIN_DELETES,
USER_SCAN
}

View File

@ -125,9 +125,7 @@ public class DefaultCompactionPolicy extends CompactionPolicy {
}
candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, majorCompaction);
CompactionRequest result = new CompactionRequest(candidateSelection);
if (!majorCompaction && !candidateSelection.isEmpty()) {
result.setOffPeak(mayUseOffPeak);
}
result.setOffPeak(!candidateSelection.isEmpty() && !majorCompaction && mayUseOffPeak);
return result;
}

View File

@ -71,6 +71,8 @@ public class DefaultCompactor extends Compactor {
// Also calculate earliest put timestamp if major compaction
int maxKeyCount = 0;
long earliestPutTs = HConstants.LATEST_TIMESTAMP;
ScanType scanType = request.isMajor()
? ScanType.COMPACT_DROP_DELETES : ScanType.COMPACT_RETAIN_DELETES;
for (StoreFile file: filesToCompact) {
StoreFile.Reader r = file.getReader();
if (r == null) {
@ -85,7 +87,7 @@ public class DefaultCompactor extends Compactor {
// For major compactions calculate the earliest put timestamp of all
// involved storefiles. This is used to remove family delete marker during
// compaction.
if (majorCompaction) {
if (scanType == ScanType.COMPACT_DROP_DELETES) {
byte [] tmp = r.loadFileInfo().get(StoreFile.EARLIEST_PUT_TS);
if (tmp == null) {
// There's a file with no information, must be an old one
@ -131,13 +133,9 @@ public class DefaultCompactor extends Compactor {
InternalScanner scanner = null;
try {
if (store.getCoprocessorHost() != null) {
scanner = store
.getCoprocessorHost()
.preCompactScannerOpen(store, scanners,
majorCompaction ? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT, earliestPutTs,
request);
scanner = store.getCoprocessorHost()
.preCompactScannerOpen(store, scanners, scanType, earliestPutTs, request);
}
ScanType scanType = majorCompaction? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT;
if (scanner == null) {
Scan scan = new Scan();
scan.setMaxVersions(store.getFamily().getMaxVersions());

View File

@ -408,7 +408,7 @@ public class HFileReadWriteTest {
// Include deletes
scanner = new StoreScanner(store, store.getScanInfo(), scan, scanners,
ScanType.MAJOR_COMPACT, Long.MIN_VALUE, Long.MIN_VALUE);
ScanType.COMPACT_DROP_DELETES, Long.MIN_VALUE, Long.MIN_VALUE);
ArrayList<KeyValue> kvs = new ArrayList<KeyValue>();

View File

@ -32,7 +32,7 @@ public class NoOpScanPolicyObserver extends BaseRegionObserver {
Scan scan = new Scan();
scan.setMaxVersions(oldSI.getMaxVersions());
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.MINOR_COMPACT, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
}
/**

View File

@ -555,7 +555,7 @@ public class TestStoreScanner extends TestCase {
KeyValue.COMPARATOR);
StoreScanner scanner =
new StoreScanner(scan, scanInfo,
ScanType.MAJOR_COMPACT, null, scanners,
ScanType.COMPACT_DROP_DELETES, null, scanners,
HConstants.OLDEST_TIMESTAMP);
List<KeyValue> results = new ArrayList<KeyValue>();
results = new ArrayList<KeyValue>();

View File

@ -574,7 +574,7 @@ public class TestAccessController {
PrivilegedExceptionAction action = new PrivilegedExceptionAction() {
public Object run() throws Exception {
ACCESS_CONTROLLER.preCompact(ObserverContext.createAndPrepare(RCP_ENV, null), null, null,
ScanType.MINOR_COMPACT);
ScanType.COMPACT_RETAIN_DELETES);
return null;
}
};

View File

@ -247,7 +247,7 @@ public class TestCoprocessorScanPolicy {
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.MINOR_COMPACT, store.getSmallestReadPoint(),
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}