/tmp/bucketcache.map
.
*/
- public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY =
+ public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY =
"hbase.bucketcache.persistent.path";
/**
@@ -104,11 +111,11 @@ public class CacheConfig {
* as indices and blooms are kept in the lru blockcache and the data blocks in the
* bucket cache).
*/
- public static final String BUCKET_CACHE_COMBINED_KEY =
+ public static final String BUCKET_CACHE_COMBINED_KEY =
"hbase.bucketcache.combinedcache.enabled";
public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads";
- public static final String BUCKET_CACHE_WRITER_QUEUE_KEY =
+ public static final String BUCKET_CACHE_WRITER_QUEUE_KEY =
"hbase.bucketcache.writer.queuelength";
/**
@@ -154,6 +161,7 @@ public class CacheConfig {
memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache");
// TODO(eclark): Consider more. Redis, etc.
Class extends BlockCache> clazz;
+ @SuppressWarnings("unchecked")
ExternalBlockCaches(String clazzName) {
try {
clazz = (Class extends BlockCache>) Class.forName(clazzName);
@@ -449,7 +457,9 @@ public class CacheConfig {
* @return true if this {@link BlockCategory} should be compressed in blockcache, false otherwise
*/
public boolean shouldCacheCompressed(BlockCategory category) {
- if (!isBlockCacheEnabled()) return false;
+ if (!isBlockCacheEnabled()) {
+ return false;
+ }
switch (category) {
case DATA:
return this.cacheDataOnRead && this.cacheDataCompressed;
@@ -531,13 +541,13 @@ public class CacheConfig {
// Clear this if in tests you'd make more than one block cache instance.
@VisibleForTesting
static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE;
- private static LruBlockCache GLOBAL_L1_CACHE_INSTANCE;
+ private static FirstLevelBlockCache GLOBAL_L1_CACHE_INSTANCE;
/** Boolean whether we have disabled the block cache entirely. */
@VisibleForTesting
static boolean blockCacheDisabled = false;
- static long getLruCacheSize(final Configuration conf, final long xmx) {
+ static long getFirstLevelCacheSize(final Configuration conf, final long xmx) {
float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
if (cachePercentage <= 0.0001f) {
@@ -555,26 +565,37 @@ public class CacheConfig {
/**
* @param c Configuration to use.
- * @return An L1 instance. Currently an instance of LruBlockCache.
+ * @return An L1 instance
*/
- public static LruBlockCache getL1(final Configuration c) {
- return getL1(c, ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax());
+ public static FirstLevelBlockCache getL1(final Configuration c) {
+ long xmx = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
+ long l1CacheSize = getFirstLevelCacheSize(c, xmx);
+ return getL1(l1CacheSize, c);
}
/**
* @param c Configuration to use.
* @param xmx Max heap memory
- * @return An L1 instance. Currently an instance of LruBlockCache.
+ * @return An L1 instance.
*/
- private synchronized static LruBlockCache getL1(final Configuration c, final long xmx) {
+ private synchronized static FirstLevelBlockCache getL1(long cacheSize, Configuration c) {
if (GLOBAL_L1_CACHE_INSTANCE != null) return GLOBAL_L1_CACHE_INSTANCE;
if (blockCacheDisabled) return null;
- long lruCacheSize = getLruCacheSize(c, xmx);
- if (lruCacheSize < 0) return null;
+ if (cacheSize < 0) return null;
+
+ String policy = c.get(HFILE_BLOCK_CACHE_POLICY_KEY, HFILE_BLOCK_CACHE_POLICY_DEFAULT);
int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
- LOG.info("Allocating LruBlockCache size=" +
- StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
- GLOBAL_L1_CACHE_INSTANCE = new LruBlockCache(lruCacheSize, blockSize, true, c);
+ LOG.info("Allocating BlockCache size=" +
+ StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
+
+ if (policy.equalsIgnoreCase("LRU")) {
+ GLOBAL_L1_CACHE_INSTANCE = new LruBlockCache(cacheSize, blockSize, true, c);
+ } else if (policy.equalsIgnoreCase("TinyLFU")) {
+ GLOBAL_L1_CACHE_INSTANCE = new TinyLfuBlockCache(
+ cacheSize, blockSize, ForkJoinPool.commonPool(), c);
+ } else {
+ throw new IllegalArgumentException("Unknown policy: " + policy);
+ }
return GLOBAL_L1_CACHE_INSTANCE;
}
@@ -601,7 +622,7 @@ public class CacheConfig {
}
private static BlockCache getExternalBlockcache(Configuration c) {
- Class klass = null;
+ Class> klass = null;
// Get the class, from the config. s
try {
@@ -629,7 +650,9 @@ public class CacheConfig {
private static BlockCache getBucketCache(Configuration c, long xmx) {
// Check for L2. ioengine name must be non-null.
String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null);
- if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) return null;
+ if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) {
+ return null;
+ }
int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
float bucketCachePercentage = c.getFloat(BUCKET_CACHE_SIZE_KEY, 0F);
@@ -679,29 +702,37 @@ public class CacheConfig {
* @return The block cache or null
.
*/
public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
- if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
- if (blockCacheDisabled) return null;
+ if (GLOBAL_BLOCK_CACHE_INSTANCE != null) {
+ return GLOBAL_BLOCK_CACHE_INSTANCE;
+ }
+ if (blockCacheDisabled) {
+ return null;
+ }
+ // blockCacheDisabled is set as a side-effect of getFirstLevelCacheSize()
+ // so check it again after the call
long xmx = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
- LruBlockCache l1 = getL1(conf, xmx);
- // blockCacheDisabled is set as a side-effect of getL1(), so check it again after the call.
- if (blockCacheDisabled) return null;
+ long l1CacheSize = getFirstLevelCacheSize(conf, xmx);
+ if (blockCacheDisabled) {
+ return null;
+ }
BlockCache l2 = getL2(conf, xmx);
+ FirstLevelBlockCache l1 = getL1(l1CacheSize, conf);
if (l2 == null) {
GLOBAL_BLOCK_CACHE_INSTANCE = l1;
} else {
boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT);
- boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY,
+ boolean combinedWithL1 = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY,
DEFAULT_BUCKET_CACHE_COMBINED);
if (useExternal) {
GLOBAL_BLOCK_CACHE_INSTANCE = new InclusiveCombinedBlockCache(l1, l2);
} else {
- if (combinedWithLru) {
+ if (combinedWithL1) {
GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2);
} else {
- // L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler
- // mechanism. It is a little ugly but works according to the following: when the
- // background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get
- // a block from the L1 cache, if not in L1, we will search L2.
+ // L1 and L2 are not 'combined'. They are connected via the FirstLevelBlockCache
+ // victimhandler mechanism. It is a little ugly but works according to the following:
+ // when the background eviction thread runs, blocks evicted from L1 will go to L2 AND when
+ // we get a block from the L1 cache, if not in L1, we will search L2.
GLOBAL_BLOCK_CACHE_INSTANCE = l1;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index 4ceda39cc0f..188900c8b57 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -30,24 +30,24 @@ import com.google.common.annotations.VisibleForTesting;
/**
* CombinedBlockCache is an abstraction layer that combines
- * {@link LruBlockCache} and {@link BucketCache}. The smaller lruCache is used
+ * {@link FirstLevelBlockCache} and {@link BucketCache}. The smaller lruCache is used
* to cache bloom blocks and index blocks. The larger l2Cache is used to
* cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads
- * first from the smaller lruCache before looking for the block in the l2Cache. Blocks evicted
- * from lruCache are put into the bucket cache.
+ * first from the smaller l1Cache before looking for the block in the l2Cache. Blocks evicted
+ * from l1Cache are put into the bucket cache.
* Metrics are the combined size and hits and misses of both caches.
- *
+ *
*/
@InterfaceAudience.Private
public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
- protected final LruBlockCache lruCache;
+ protected final FirstLevelBlockCache l1Cache;
protected final BlockCache l2Cache;
protected final CombinedCacheStats combinedCacheStats;
- public CombinedBlockCache(LruBlockCache lruCache, BlockCache l2Cache) {
- this.lruCache = lruCache;
+ public CombinedBlockCache(FirstLevelBlockCache l1Cache, BlockCache l2Cache) {
+ this.l1Cache = l1Cache;
this.l2Cache = l2Cache;
- this.combinedCacheStats = new CombinedCacheStats(lruCache.getStats(),
+ this.combinedCacheStats = new CombinedCacheStats(l1Cache.getStats(),
l2Cache.getStats());
}
@@ -57,7 +57,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
if (l2Cache instanceof HeapSize) {
l2size = ((HeapSize) l2Cache).heapSize();
}
- return lruCache.heapSize() + l2size;
+ return l1Cache.heapSize() + l2size;
}
@Override
@@ -65,7 +65,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
final boolean cacheDataInL1) {
boolean metaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA;
if (metaBlock || cacheDataInL1) {
- lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
+ l1Cache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
} else {
l2Cache.cacheBlock(cacheKey, buf, inMemory, false);
}
@@ -81,19 +81,19 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
boolean repeat, boolean updateCacheMetrics) {
// TODO: is there a hole here, or just awkwardness since in the lruCache getBlock
// we end up calling l2Cache.getBlock.
- return lruCache.containsBlock(cacheKey)?
- lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics):
+ return l1Cache.containsBlock(cacheKey)?
+ l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics):
l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
}
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
- return lruCache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey);
+ return l1Cache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey);
}
@Override
public int evictBlocksByHfileName(String hfileName) {
- return lruCache.evictBlocksByHfileName(hfileName)
+ return l1Cache.evictBlocksByHfileName(hfileName)
+ l2Cache.evictBlocksByHfileName(hfileName);
}
@@ -104,28 +104,28 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
@Override
public void shutdown() {
- lruCache.shutdown();
+ l1Cache.shutdown();
l2Cache.shutdown();
}
@Override
public long size() {
- return lruCache.size() + l2Cache.size();
+ return l1Cache.size() + l2Cache.size();
}
@Override
public long getFreeSize() {
- return lruCache.getFreeSize() + l2Cache.getFreeSize();
+ return l1Cache.getFreeSize() + l2Cache.getFreeSize();
}
@Override
public long getCurrentSize() {
- return lruCache.getCurrentSize() + l2Cache.getCurrentSize();
+ return l1Cache.getCurrentSize() + l2Cache.getCurrentSize();
}
@Override
public long getBlockCount() {
- return lruCache.getBlockCount() + l2Cache.getBlockCount();
+ return l1Cache.getBlockCount() + l2Cache.getBlockCount();
}
public static class CombinedCacheStats extends CacheStats {
@@ -310,7 +310,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
lruCacheStats.rollMetricsPeriod();
bucketCacheStats.rollMetricsPeriod();
}
-
+
@Override
public long getFailedInserts() {
return lruCacheStats.getFailedInserts() + bucketCacheStats.getFailedInserts();
@@ -321,13 +321,13 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
return lruCacheStats.getSumHitCountsPastNPeriods()
+ bucketCacheStats.getSumHitCountsPastNPeriods();
}
-
+
@Override
public long getSumRequestCountsPastNPeriods() {
return lruCacheStats.getSumRequestCountsPastNPeriods()
+ bucketCacheStats.getSumRequestCountsPastNPeriods();
}
-
+
@Override
public long getSumHitCachingCountsPastNPeriods() {
return lruCacheStats.getSumHitCachingCountsPastNPeriods()
@@ -348,12 +348,12 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
@Override
public BlockCache[] getBlockCaches() {
- return new BlockCache [] {this.lruCache, this.l2Cache};
+ return new BlockCache [] {this.l1Cache, this.l2Cache};
}
@Override
public void setMaxSize(long size) {
- this.lruCache.setMaxSize(size);
+ this.l1Cache.setMaxSize(size);
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java
new file mode 100644
index 00000000000..6becd0ea248
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.HeapSize;
+
+/**
+ * In-memory BlockCache that may be backed by secondary layer(s).
+ */
+@InterfaceAudience.Private
+public interface FirstLevelBlockCache extends ResizableBlockCache, HeapSize {
+
+ /**
+ * Whether the cache contains the block with specified cacheKey
+ *
+ * @param cacheKey
+ * @return true if it contains the block
+ */
+ boolean containsBlock(BlockCacheKey cacheKey);
+
+ /**
+ * Specifies the secondary cache. An entry that is evicted from this cache due to a size
+ * constraint will be inserted into the victim cache.
+ *
+ * @param victimCache the second level cache
+ * @throws IllegalArgumentException if the victim cache had already been set
+ */
+ void setVictimCache(BlockCache victimCache);
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java
index 667e7b4c14b..160714ba863 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class InclusiveCombinedBlockCache extends CombinedBlockCache implements BlockCache {
- public InclusiveCombinedBlockCache(LruBlockCache l1, BlockCache l2) {
+ public InclusiveCombinedBlockCache(FirstLevelBlockCache l1, BlockCache l2) {
super(l1,l2);
}
@@ -34,7 +34,7 @@ public class InclusiveCombinedBlockCache extends CombinedBlockCache implements B
// On all external cache set ups the lru should have the l2 cache set as the victimHandler
// Because of that all requests that miss inside of the lru block cache will be
// tried in the l2 block cache.
- return lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
+ return l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
}
/**
@@ -50,7 +50,7 @@ public class InclusiveCombinedBlockCache extends CombinedBlockCache implements B
final boolean cacheDataInL1) {
// This is the inclusive part of the combined block cache.
// Every block is placed into both block caches.
- lruCache.cacheBlock(cacheKey, buf, inMemory, true);
+ l1Cache.cacheBlock(cacheKey, buf, inMemory, true);
// This assumes that insertion into the L2 block cache is either async or very fast.
l2Cache.cacheBlock(cacheKey, buf, inMemory, true);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index f4545494a50..04700b9e216 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -18,6 +18,8 @@
*/
package org.apache.hadoop.hbase.io.hfile;
+import static java.util.Objects.requireNonNull;
+
import java.lang.ref.WeakReference;
import java.nio.ByteBuffer;
import java.util.EnumMap;
@@ -57,15 +59,15 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* {@link ConcurrentHashMap} and with a non-blocking eviction thread giving
* constant-time {@link #cacheBlock} and {@link #getBlock} operations.* - * Contains three levels of block priority to allow for scan-resistance and in-memory families - * {@link org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)} (An in-memory column + * Contains three levels of block priority to allow for scan-resistance and in-memory families + * {@link org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)} (An in-memory column * family is a column family that should be served from memory if possible): * single-access, multiple-accesses, and in-memory priority. * A block is added with an in-memory priority flag if * {@link org.apache.hadoop.hbase.HColumnDescriptor#isInMemory()}, otherwise a block becomes a * single access priority the first time it is read into this block cache. If a block is * accessed again while in cache, it is marked as a multiple access priority block. This - * delineation of blocks is used to prevent scans from thrashing the cache adding a + * delineation of blocks is used to prevent scans from thrashing the cache adding a * least-frequently-used element to the eviction algorithm.
* * Each priority is given its own chunk of the total cache to ensure @@ -95,7 +97,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; */ @InterfaceAudience.Private @JsonIgnoreProperties({"encodingCountsForTest"}) -public class LruBlockCache implements ResizableBlockCache, HeapSize { +public class LruBlockCache implements FirstLevelBlockCache { private static final Log LOG = LogFactory.getLog(LruBlockCache.class); @@ -238,8 +240,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { DEFAULT_MEMORY_FACTOR, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, - DEFAULT_MAX_BLOCK_SIZE - ); + DEFAULT_MAX_BLOCK_SIZE); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { @@ -254,8 +255,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), - conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE) - ); + conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)); } public LruBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -321,6 +321,14 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS); } + @Override + public void setVictimCache(BlockCache victimCache) { + if (victimHandler != null) { + throw new IllegalArgumentException("The victim cache has already been set"); + } + victimHandler = requireNonNull(victimCache); + } + @Override public void setMaxSize(long maxSize) { this.maxSize = maxSize; @@ -434,6 +442,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * @param cacheKey block's cache key * @param buf block buffer */ + @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { cacheBlock(cacheKey, buf, false, false); } @@ -498,6 +507,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * @param cacheKey * @return true if contains the block */ + @Override public boolean containsBlock(BlockCacheKey cacheKey) { return map.containsKey(cacheKey); } @@ -785,6 +795,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return totalSize; } + @Override public int compareTo(BlockBucket that) { return Long.compare(this.overflow(), that.overflow()); } @@ -946,6 +957,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { *
Includes: total accesses, hits, misses, evicted blocks, and runs
* of the eviction processes.
*/
+ @Override
public CacheStats getStats() {
return this.stats;
}
@@ -1074,6 +1086,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
return (long)Math.floor(this.maxSize * this.memoryFactor * this.minFactor);
}
+ @Override
public void shutdown() {
if (victimHandler != null)
victimHandler.shutdown();
@@ -1122,7 +1135,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
Map BucketCache can be used as mainly a block cache (see
* {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
- * LruBlockCache to decrease CMS GC and heap fragmentation.
+ * a BlockCache to decrease CMS GC and heap fragmentation.
*
* It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store
- * blocks) to enlarge cache space via
- * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache#setVictimCache}
+ * blocks) to enlarge cache space via a victim cache.
*/
@InterfaceAudience.Private
public class BucketCache implements BlockCache, HeapSize {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index d9d52613dd6..bf44d339cf3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -37,12 +37,12 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.testclassification.IOTests;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.testclassification.IOTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.After;
import org.junit.Before;
@@ -328,7 +328,7 @@ public class TestCacheConfig {
BlockCache [] bcs = cbc.getBlockCaches();
assertTrue(bcs[0] instanceof LruBlockCache);
LruBlockCache lbc = (LruBlockCache)bcs[0];
- assertEquals(CacheConfig.getLruCacheSize(this.conf,
+ assertEquals(CacheConfig.getFirstLevelCacheSize(this.conf,
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()), lbc.getMaxSize());
assertTrue(bcs[1] instanceof BucketCache);
BucketCache bc = (BucketCache)bcs[1];
@@ -347,7 +347,7 @@ public class TestCacheConfig {
// from L1 happens, it does not fail because L2 can't take the eviction because block too big.
this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f);
MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
- long lruExpectedSize = CacheConfig.getLruCacheSize(this.conf, mu.getMax());
+ long lruExpectedSize = CacheConfig.getFirstLevelCacheSize(this.conf, mu.getMax());
final int bcSize = 100;
long bcExpectedSize = 100 * 1024 * 1024; // MB.
assertTrue(lruExpectedSize < bcExpectedSize);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java
new file mode 100644
index 00000000000..f06da8c74ac
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java
@@ -0,0 +1,304 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.nio.ByteBuffer;
+import java.util.Random;
+
+import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.testclassification.IOTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.ClassSize;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Tests the concurrent TinyLfuBlockCache.
+ */
+@Category({IOTests.class, SmallTests.class})
+public class TestTinyLfuBlockCache {
+
+ @Test
+ public void testCacheSimple() throws Exception {
+
+ long maxSize = 1000000;
+ long blockSize = calculateBlockSizeDefault(maxSize, 101);
+
+ TinyLfuBlockCache cache = new TinyLfuBlockCache(maxSize, blockSize, blockSize, Runnable::run);
+
+ CachedItem [] blocks = generateRandomBlocks(100, blockSize);
+
+ long expectedCacheSize = cache.heapSize();
+
+ // Confirm empty
+ for (CachedItem block : blocks) {
+ assertTrue(cache.getBlock(block.cacheKey, true, false, true) == null);
+ }
+
+ // Add blocks
+ for (CachedItem block : blocks) {
+ cache.cacheBlock(block.cacheKey, block);
+ expectedCacheSize += block.heapSize();
+ }
+
+ // Verify correctly calculated cache heap size
+ assertEquals(expectedCacheSize, cache.heapSize());
+
+ // Check if all blocks are properly cached and retrieved
+ for (CachedItem block : blocks) {
+ HeapSize buf = cache.getBlock(block.cacheKey, true, false, true);
+ assertTrue(buf != null);
+ assertEquals(buf.heapSize(), block.heapSize());
+ }
+
+ // Re-add same blocks and ensure nothing has changed
+ long expectedBlockCount = cache.getBlockCount();
+ for (CachedItem block : blocks) {
+ cache.cacheBlock(block.cacheKey, block);
+ }
+ assertEquals(
+ "Cache should ignore cache requests for blocks already in cache",
+ expectedBlockCount, cache.getBlockCount());
+
+ // Verify correctly calculated cache heap size
+ assertEquals(expectedCacheSize, cache.heapSize());
+
+ // Check if all blocks are properly cached and retrieved
+ for (CachedItem block : blocks) {
+ HeapSize buf = cache.getBlock(block.cacheKey, true, false, true);
+ assertTrue(buf != null);
+ assertEquals(buf.heapSize(), block.heapSize());
+ }
+
+ // Expect no evictions
+ assertEquals(0, cache.getStats().getEvictionCount());
+ }
+
+ @Test
+ public void testCacheEvictionSimple() throws Exception {
+
+ long maxSize = 100000;
+ long blockSize = calculateBlockSizeDefault(maxSize, 10);
+
+ TinyLfuBlockCache cache = new TinyLfuBlockCache(maxSize, blockSize, blockSize, Runnable::run);
+
+ CachedItem [] blocks = generateFixedBlocks(11, blockSize, "block");
+
+ // Add all the blocks
+ for (CachedItem block : blocks) {
+ cache.cacheBlock(block.cacheKey, block);
+ }
+
+ // A single eviction run should have occurred
+ assertEquals(1, cache.getStats().getEvictionCount());
+
+ // The cache did not grow beyond max
+ assertTrue(cache.heapSize() < maxSize);
+
+ // All blocks except one should be in the cache
+ assertEquals(10, cache.getBlockCount());
+ }
+
+ @Test
+ public void testScanResistance() throws Exception {
+
+ long maxSize = 100000;
+ long blockSize = calculateBlockSize(maxSize, 10);
+
+ TinyLfuBlockCache cache = new TinyLfuBlockCache(maxSize, blockSize, blockSize, Runnable::run);
+
+ CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single");
+ CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi");
+
+ // Add 5 blocks from each
+ for(int i=0; i<5; i++) {
+ cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
+ cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]);
+ }
+
+ // Add frequency
+ for (int i = 0; i < 5; i++) {
+ for (int j = 0; j < 10; j++) {
+ CachedItem block = multiBlocks[i];
+ cache.getBlock(block.cacheKey, true, false, true);
+ }
+ }
+
+ // Let's keep "scanning" by adding single blocks. From here on we only
+ // expect evictions from the single bucket.
+
+ for(int i=5;i<18;i++) {
+ cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
+ }
+
+ for (CachedItem block : multiBlocks) {
+ assertTrue(cache.cache.asMap().containsKey(block.cacheKey));
+ }
+
+ assertEquals(10, cache.getBlockCount());
+ assertEquals(13, cache.getStats().getEvictionCount());
+
+ }
+
+ @Test
+ public void testMaxBlockSize() throws Exception {
+ long maxSize = 100000;
+ long blockSize = calculateBlockSize(maxSize, 10);
+
+ TinyLfuBlockCache cache = new TinyLfuBlockCache(maxSize, blockSize, blockSize, Runnable::run);
+ CachedItem [] tooLong = generateFixedBlocks(10, 2 * blockSize, "long");
+ CachedItem [] small = generateFixedBlocks(15, blockSize / 2, "small");
+
+ for (CachedItem i:tooLong) {
+ cache.cacheBlock(i.cacheKey, i);
+ }
+ for (CachedItem i:small) {
+ cache.cacheBlock(i.cacheKey, i);
+ }
+ assertEquals(15,cache.getBlockCount());
+ for (CachedItem i:small) {
+ assertNotNull(cache.getBlock(i.cacheKey, true, false, false));
+ }
+ for (CachedItem i:tooLong) {
+ assertNull(cache.getBlock(i.cacheKey, true, false, false));
+ }
+
+ assertEquals(10, cache.getStats().getFailedInserts());
+ }
+
+ @Test
+ public void testResizeBlockCache() throws Exception {
+
+ long maxSize = 100000;
+ long blockSize = calculateBlockSize(maxSize, 10);
+
+ TinyLfuBlockCache cache = new TinyLfuBlockCache(maxSize, blockSize, blockSize, Runnable::run);
+
+ CachedItem [] blocks = generateFixedBlocks(10, blockSize, "block");
+
+ for(CachedItem block : blocks) {
+ cache.cacheBlock(block.cacheKey, block);
+ }
+
+ // Do not expect any evictions yet
+ assertEquals(10, cache.getBlockCount());
+ assertEquals(0, cache.getStats().getEvictionCount());
+
+ // Resize to half capacity plus an extra block (otherwise we evict an extra)
+ cache.setMaxSize(maxSize / 2);
+
+ // And we expect 1/2 of the blocks to be evicted
+ assertEquals(5, cache.getBlockCount());
+ assertEquals(5, cache.getStats().getEvictedCount());
+ }
+
+ private CachedItem [] generateFixedBlocks(int numBlocks, int size, String pfx) {
+ CachedItem [] blocks = new CachedItem[numBlocks];
+ for(int i=0;i
+ *
+ */
+@InterfaceAudience.Private
+public final class TinyLfuBlockCache implements FirstLevelBlockCache {
+ private static final Log LOG = LogFactory.getLog(TinyLfuBlockCache.class);
+
+ private static final String MAX_BLOCK_SIZE = "hbase.tinylfu.max.block.size";
+ private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L;
+ private static final int STAT_THREAD_PERIOD_SECONDS = 5 * 60;
+
+ private final Eviction