diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 0324bd5da8f..40cf6755f89 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -802,6 +802,24 @@ possible configurations would overwhelm and obscure the important. The value field assumes that the value of hbase.hregion.memstore.flush.size is unchanged from the default. + + hbase.regionserver.majorcompaction.pagecache.drop + true + Specifies whether to drop pages read/written into the system page cache by + major compactions. Setting it to true helps prevent major compactions from + polluting the page cache, which is almost always required, especially for clusters + with low/moderate memory to storage ratio. + + + hbase.regionserver.minorcompaction.pagecache.drop + true + Specifies whether to drop pages read/written into the system page cache by + minor compactions. Setting it to true helps prevent minor compactions from + polluting the page cache, which is most beneficial on clusters with low + memory to storage ratio or very write heavy clusters. You may want to set it to + false under moderate to low write workload when bulk of the reads are + on the most recently written data. + hbase.hstore.compaction.kv.max 10 diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index e1c08bd9584..84e43147bbb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -79,6 +79,15 @@ public abstract class Compactor { /** specify how many days to keep MVCC values during major compaction **/ protected int keepSeqIdPeriod; + // Configs that drive whether we drop page cache behind compactions + protected static final String MAJOR_COMPACTION_DROP_CACHE = + "hbase.regionserver.majorcompaction.pagecache.drop"; + protected static final String MINOR_COMPACTION_DROP_CACHE = + "hbase.regionserver.minorcompaction.pagecache.drop"; + + private boolean dropCacheMajor; + private boolean dropCacheMinor; + //TODO: depending on Store is not good but, realistically, all compactors currently do. Compactor(final Configuration conf, final Store store) { this.conf = conf; @@ -89,8 +98,12 @@ public abstract class Compactor { Compression.Algorithm.NONE : this.store.getFamily().getCompactionCompressionType(); this.keepSeqIdPeriod = Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD, HConstants.MIN_KEEP_SEQID_PERIOD), HConstants.MIN_KEEP_SEQID_PERIOD); + this.dropCacheMajor = conf.getBoolean(MAJOR_COMPACTION_DROP_CACHE, true); + this.dropCacheMinor = conf.getBoolean(MINOR_COMPACTION_DROP_CACHE, true); } + + protected interface CellSinkFactory { S createWriter(InternalScanner scanner, FileDetails fd, boolean shouldDropBehind) throws IOException; @@ -271,6 +284,13 @@ public abstract class Compactor { List scanners; Collection readersToClose; T writer = null; + boolean dropCache; + if (request.isMajor() || request.isAllFiles()) { + dropCache = this.dropCacheMajor; + } else { + dropCache = this.dropCacheMinor; + } + if (this.conf.getBoolean("hbase.regionserver.compaction.private.readers", true)) { // clone all StoreFiles, so we'll do the compaction on a independent copy of StoreFiles, // HFiles, and their readers @@ -282,12 +302,10 @@ public abstract class Compactor { clonedStoreFile.createReader(); readersToClose.add(clonedStoreFile); } - scanners = createFileScanners(readersToClose, smallestReadPoint, - store.throttleCompaction(request.getSize())); + scanners = createFileScanners(readersToClose, smallestReadPoint, dropCache); } else { readersToClose = Collections.emptyList(); - scanners = createFileScanners(request.getFiles(), smallestReadPoint, - store.throttleCompaction(request.getSize())); + scanners = createFileScanners(request.getFiles(), smallestReadPoint, dropCache); } InternalScanner scanner = null; boolean finished = false; @@ -309,7 +327,7 @@ public abstract class Compactor { smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint); cleanSeqId = true; } - writer = sinkFactory.createWriter(scanner, fd, store.throttleCompaction(request.getSize())); + writer = sinkFactory.createWriter(scanner, fd, dropCache); finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId, throughputController, request.isAllFiles(), request.getFiles().size()); if (!finished) {