HBASE-17057 Minor compactions should also drop page cache (Ashu Pachauri)

Signed-off-by: Gary Helmling <garyh@apache.org>
This commit is contained in:
Ashu Pachauri 2016-11-30 15:47:59 -08:00 committed by Gary Helmling
parent a8fd1119ce
commit e7d16db2ac
2 changed files with 41 additions and 5 deletions

View File

@ -802,6 +802,24 @@ possible configurations would overwhelm and obscure the important.
The value field assumes that the value of hbase.hregion.memstore.flush.size is unchanged from
the default.</description>
</property>
<property>
<name>hbase.regionserver.majorcompaction.pagecache.drop</name>
<value>true</value>
<description>Specifies whether to drop pages read/written into the system page cache by
major compactions. Setting it to true helps prevent major compactions from
polluting the page cache, which is almost always required, especially for clusters
with low/moderate memory to storage ratio.</description>
</property>
<property>
<name>hbase.regionserver.minorcompaction.pagecache.drop</name>
<value>true</value>
<description>Specifies whether to drop pages read/written into the system page cache by
minor compactions. Setting it to true helps prevent minor compactions from
polluting the page cache, which is most beneficial on clusters with low
memory to storage ratio or very write heavy clusters. You may want to set it to
false under moderate to low write workload when bulk of the reads are
on the most recently written data.</description>
</property>
<property>
<name>hbase.hstore.compaction.kv.max</name>
<value>10</value>

View File

@ -79,6 +79,15 @@ public abstract class Compactor<T extends CellSink> {
/** specify how many days to keep MVCC values during major compaction **/
protected int keepSeqIdPeriod;
// Configs that drive whether we drop page cache behind compactions
protected static final String MAJOR_COMPACTION_DROP_CACHE =
"hbase.regionserver.majorcompaction.pagecache.drop";
protected static final String MINOR_COMPACTION_DROP_CACHE =
"hbase.regionserver.minorcompaction.pagecache.drop";
private boolean dropCacheMajor;
private boolean dropCacheMinor;
//TODO: depending on Store is not good but, realistically, all compactors currently do.
Compactor(final Configuration conf, final Store store) {
this.conf = conf;
@ -89,8 +98,12 @@ public abstract class Compactor<T extends CellSink> {
Compression.Algorithm.NONE : this.store.getFamily().getCompactionCompressionType();
this.keepSeqIdPeriod = Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD,
HConstants.MIN_KEEP_SEQID_PERIOD), HConstants.MIN_KEEP_SEQID_PERIOD);
this.dropCacheMajor = conf.getBoolean(MAJOR_COMPACTION_DROP_CACHE, true);
this.dropCacheMinor = conf.getBoolean(MINOR_COMPACTION_DROP_CACHE, true);
}
protected interface CellSinkFactory<S> {
S createWriter(InternalScanner scanner, FileDetails fd, boolean shouldDropBehind)
throws IOException;
@ -271,6 +284,13 @@ public abstract class Compactor<T extends CellSink> {
List<StoreFileScanner> scanners;
Collection<StoreFile> readersToClose;
T writer = null;
boolean dropCache;
if (request.isMajor() || request.isAllFiles()) {
dropCache = this.dropCacheMajor;
} else {
dropCache = this.dropCacheMinor;
}
if (this.conf.getBoolean("hbase.regionserver.compaction.private.readers", true)) {
// clone all StoreFiles, so we'll do the compaction on a independent copy of StoreFiles,
// HFiles, and their readers
@ -282,12 +302,10 @@ public abstract class Compactor<T extends CellSink> {
clonedStoreFile.createReader();
readersToClose.add(clonedStoreFile);
}
scanners = createFileScanners(readersToClose, smallestReadPoint,
store.throttleCompaction(request.getSize()));
scanners = createFileScanners(readersToClose, smallestReadPoint, dropCache);
} else {
readersToClose = Collections.emptyList();
scanners = createFileScanners(request.getFiles(), smallestReadPoint,
store.throttleCompaction(request.getSize()));
scanners = createFileScanners(request.getFiles(), smallestReadPoint, dropCache);
}
InternalScanner scanner = null;
boolean finished = false;
@ -309,7 +327,7 @@ public abstract class Compactor<T extends CellSink> {
smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint);
cleanSeqId = true;
}
writer = sinkFactory.createWriter(scanner, fd, store.throttleCompaction(request.getSize()));
writer = sinkFactory.createWriter(scanner, fd, dropCache);
finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId,
throughputController, request.isAllFiles(), request.getFiles().size());
if (!finished) {