From d952c5df9f73f365888bfe22da043935cfe7fe5a Mon Sep 17 00:00:00 2001 From: anoopsamjohn Date: Wed, 6 Dec 2017 11:26:10 +0530 Subject: [PATCH] HBASE-19357 Bucket cache no longer L2 for LRU cache. --- .../hadoop/hbase/HColumnDescriptor.java | 18 +-- .../hbase/client/ColumnFamilyDescriptor.java | 6 +- .../client/ColumnFamilyDescriptorBuilder.java | 36 ----- .../hbase/client/TableDescriptorBuilder.java | 3 - .../TestColumnFamilyDescriptorBuilder.java | 3 +- .../TestImmutableHColumnDescriptor.java | 1 - .../src/main/resources/hbase-default.xml | 7 - .../hbase/io/hfile/MemcachedBlockCache.java | 11 +- .../hadoop/hbase/io/hfile/BlockCache.java | 7 +- .../hadoop/hbase/io/hfile/CacheConfig.java | 153 ++++++------------ .../hbase/io/hfile/CombinedBlockCache.java | 60 +++---- .../hbase/io/hfile/HFileReaderImpl.java | 5 +- .../io/hfile/InclusiveCombinedBlockCache.java | 23 +-- .../hadoop/hbase/io/hfile/LruBlockCache.java | 40 ++--- .../hbase/io/hfile/bucket/BucketCache.java | 25 ++- .../hadoop/hbase/io/util/MemorySizeUtil.java | 2 +- .../hbase/regionserver/HeapMemoryManager.java | 6 +- .../MetricsRegionServerWrapperImpl.java | 4 +- .../security/access/AccessController.java | 5 +- .../hadoop/hbase/util/FSTableDescriptors.java | 15 -- .../hbase/io/hfile/TestCacheConfig.java | 67 ++------ .../hbase/io/hfile/TestCacheOnWrite.java | 2 +- .../hbase/io/hfile/TestLruBlockCache.java | 30 ++-- .../io/hfile/bucket/TestBucketCache.java | 5 +- .../regionserver/TestHeapMemoryManager.java | 3 +- 25 files changed, 159 insertions(+), 378 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 7f85dc7f874..be59f8ca67c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -52,7 +52,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparablehbase(main):003:0> create 't', - * {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}} - */ - @InterfaceAudience.Private - public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1"; - private static final Bytes CACHE_DATA_IN_L1_BYTES = new Bytes(Bytes.toBytes(CACHE_DATA_IN_L1)); /** * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM, @@ -228,13 +220,6 @@ public class ColumnFamilyDescriptorBuilder { */ public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; - /** - * Default setting for whether to cache data blocks in L1 tier. Only makes - * sense if more than one tier in operations: i.e. if we have an L1 and a L2. - * This will be the cases if we are using BucketCache. - */ - public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false; - /** * Default setting for whether to cache index blocks on write if block caching * is enabled. @@ -310,7 +295,6 @@ public class ColumnFamilyDescriptorBuilder { DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1)); DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE)); DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE)); DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE)); @@ -444,11 +428,6 @@ public class ColumnFamilyDescriptorBuilder { return this; } - public ColumnFamilyDescriptorBuilder setCacheDataInL1(boolean value) { - desc.setCacheDataInL1(value); - return this; - } - public ColumnFamilyDescriptorBuilder setCacheDataOnWrite(boolean value) { desc.setCacheDataOnWrite(value); return this; @@ -1010,21 +989,6 @@ public class ColumnFamilyDescriptorBuilder { return setValue(CACHE_DATA_ON_WRITE_BYTES, Boolean.toString(value)); } - @Override - public boolean isCacheDataInL1() { - return getStringOrDefault(CACHE_DATA_IN_L1_BYTES, Boolean::valueOf, DEFAULT_CACHE_DATA_IN_L1); - } - - /** - * @param value true if we should cache data blocks in the L1 cache (if - * block cache deploy has more than one tier; e.g. we are using - * CombinedBlockCache). - * @return this (for chained invocation) - */ - public ModifyableColumnFamilyDescriptor setCacheDataInL1(boolean value) { - return setValue(CACHE_DATA_IN_L1_BYTES, Boolean.toString(value)); - } - @Override public boolean isCacheIndexesOnWrite() { return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_INDEX_ON_WRITE); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index ef593113245..e22153ab31e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -226,9 +226,6 @@ public class TableDescriptorBuilder { .setInMemory(true) .setBlocksize(8 * 1024) .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true) .build()) .build(); private final ModifyableTableDescriptor desc; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java index 671cb14c2b1..42541395e50 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -45,8 +45,7 @@ public class TestColumnFamilyDescriptorBuilder { = ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) .setInMemory(true) .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setBloomFilterType(BloomType.NONE) - .setCacheDataInL1(true); + .setBloomFilterType(BloomType.NONE); final int v = 123; builder.setBlocksize(v); builder.setTimeToLive(v); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java index cea2c7a953f..79ec9593183 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java @@ -51,7 +51,6 @@ public class TestImmutableHColumnDescriptor { hcd -> hcd.setBlocksize(10), hcd -> hcd.setBloomFilterType(BloomType.NONE), hcd -> hcd.setCacheBloomsOnWrite(false), - hcd -> hcd.setCacheDataInL1(true), hcd -> hcd.setCacheDataOnWrite(true), hcd -> hcd.setCacheIndexesOnWrite(true), hcd -> hcd.setCompactionCompressionType(Compression.Algorithm.LZO), diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index c9fe83bca11..3094ed14bf5 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -900,13 +900,6 @@ possible configurations would overwhelm and obscure the important. See http://hbase.apache.org/book.html#offheap.blockcache for more information. - - hbase.bucketcache.combinedcache.enabled - true - Whether or not the bucketcache is used in league with the LRU - on-heap block cache. In this mode, indices and blooms are kept in the LRU - blockcache and the data blocks are kept in the bucketcache. - hbase.bucketcache.size diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java index c05499c34e2..b12ac1dd632 100644 --- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -110,10 +110,7 @@ public class MemcachedBlockCache implements BlockCache { } @Override - public void cacheBlock(BlockCacheKey cacheKey, - Cacheable buf, - boolean inMemory, - boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { cacheBlock(cacheKey, buf); } @@ -288,10 +285,4 @@ public class MemcachedBlockCache implements BlockCache { return MAX_SIZE; } } - - @Override - public void returnBlock(BlockCacheKey cacheKey, Cacheable block) { - // Not doing reference counting. All blocks here are EXCLUSIVE - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 20ec8eee201..103d1136304 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -34,11 +34,8 @@ public interface BlockCache extends Iterable { * @param cacheKey The block's cache key. * @param buf The block contents wrapped in a ByteBuffer. * @param inMemory Whether block should be treated as in-memory - * @param cacheDataInL1 If multi-tier block cache deploy -- i.e. has an L1 and L2 tier -- then - * if this flag is true, cache data blocks up in the L1 tier (meta blocks are probably being - * cached in L1 already). */ - void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, boolean cacheDataInL1); + void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory); /** * Add block to cache (defaults to not in-memory). @@ -146,5 +143,5 @@ public interface BlockCache extends Iterable { * @param cacheKey the cache key of the block * @param block the hfileblock to be returned */ - void returnBlock(BlockCacheKey cacheKey, Cacheable block); + default void returnBlock(BlockCacheKey cacheKey, Cacheable block){}; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index b89205c277f..a071fbdc89b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; @@ -109,14 +108,6 @@ public class CacheConfig { public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; - /** - * If the bucket cache is used in league with the lru on-heap block cache (meta blocks such - * as indices and blooms are kept in the lru blockcache and the data blocks in the - * bucket cache). - */ - public static final String BUCKET_CACHE_COMBINED_KEY = - "hbase.bucketcache.combinedcache.enabled"; - public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads"; public static final String BUCKET_CACHE_WRITER_QUEUE_KEY = "hbase.bucketcache.writer.queuelength"; @@ -129,7 +120,6 @@ public class CacheConfig { /** * Defaults for Bucket cache */ - public static final boolean DEFAULT_BUCKET_CACHE_COMBINED = true; public static final int DEFAULT_BUCKET_CACHE_WRITER_THREADS = 3; public static final int DEFAULT_BUCKET_CACHE_WRITER_QUEUE = 64; @@ -218,13 +208,6 @@ public class CacheConfig { /** Whether data blocks should be prefetched into the cache */ private final boolean prefetchOnOpen; - /** - * If true and if more than one tier in this cache deploy -- e.g. CombinedBlockCache has an L1 - * and an L2 tier -- then cache data blocks up in the L1 tier (The meta blocks are likely being - * cached up in L1 already. At least this is the case if CombinedBlockCache). - */ - private boolean cacheDataInL1; - private final boolean dropBehindCompaction; /** @@ -251,8 +234,6 @@ public class CacheConfig { conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED), conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(), - conf.getBoolean(ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1, - ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(), conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) ); LOG.info("Created cacheConfig for " + family.getNameAsString() + ": " + this); @@ -276,8 +257,6 @@ public class CacheConfig { conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE), conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED), conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN), - conf.getBoolean(ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1, - ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1), conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) ); LOG.info("Created cacheConfig: " + this); @@ -305,7 +284,7 @@ public class CacheConfig { final boolean cacheDataOnWrite, final boolean cacheIndexesOnWrite, final boolean cacheBloomsOnWrite, final boolean evictOnClose, final boolean cacheDataCompressed, final boolean prefetchOnOpen, - final boolean cacheDataInL1, final boolean dropBehindCompaction) { + final boolean dropBehindCompaction) { this.blockCache = blockCache; this.cacheDataOnRead = cacheDataOnRead; this.inMemory = inMemory; @@ -315,7 +294,6 @@ public class CacheConfig { this.evictOnClose = evictOnClose; this.cacheDataCompressed = cacheDataCompressed; this.prefetchOnOpen = prefetchOnOpen; - this.cacheDataInL1 = cacheDataInL1; this.dropBehindCompaction = dropBehindCompaction; } @@ -328,12 +306,11 @@ public class CacheConfig { cacheConf.cacheDataOnWrite, cacheConf.cacheIndexesOnWrite, cacheConf.cacheBloomsOnWrite, cacheConf.evictOnClose, cacheConf.cacheDataCompressed, cacheConf.prefetchOnOpen, - cacheConf.cacheDataInL1, cacheConf.dropBehindCompaction); + cacheConf.dropBehindCompaction); } private CacheConfig() { - this(null, false, false, false, false, false, - false, false, false, false, false); + this(null, false, false, false, false, false, false, false, false, false); } /** @@ -386,13 +363,6 @@ public class CacheConfig { return isBlockCacheEnabled() && this.inMemory; } - /** - * @return True if cache data blocks in L1 tier (if more than one tier in block cache deploy). - */ - public boolean isCacheDataInL1() { - return isBlockCacheEnabled() && this.cacheDataInL1; - } - /** * @return true if data blocks should be written to the cache when an HFile is * written, false if not @@ -411,16 +381,6 @@ public class CacheConfig { this.cacheDataOnWrite = cacheDataOnWrite; } - /** - * Only used for testing. - * @param cacheDataInL1 Whether to cache data blocks up in l1 (if a multi-tier cache - * implementation). - */ - @VisibleForTesting - public void setCacheDataInL1(boolean cacheDataInL1) { - this.cacheDataInL1 = cacheDataInL1; - } - /** * @return true if index blocks should be written to the cache when an HFile * is written, false if not @@ -547,8 +507,8 @@ public class CacheConfig { // Clear this if in tests you'd make more than one block cache instance. @VisibleForTesting static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE; - private static LruBlockCache GLOBAL_L1_CACHE_INSTANCE = null; - private static BlockCache GLOBAL_L2_CACHE_INSTANCE = null; + private static LruBlockCache ONHEAP_CACHE_INSTANCE = null; + private static BlockCache L2_CACHE_INSTANCE = null;// Can be BucketCache or External cache. /** Boolean whether we have disabled the block cache entirely. */ @VisibleForTesting @@ -558,20 +518,20 @@ public class CacheConfig { * @param c Configuration to use. * @return An L1 instance. Currently an instance of LruBlockCache. */ - public static LruBlockCache getL1(final Configuration c) { - return getL1Internal(c); + public static LruBlockCache getOnHeapCache(final Configuration c) { + return getOnHeapCacheInternal(c); } - public CacheStats getL1Stats() { - if (GLOBAL_L1_CACHE_INSTANCE != null) { - return GLOBAL_L1_CACHE_INSTANCE.getStats(); + public CacheStats getOnHeapCacheStats() { + if (ONHEAP_CACHE_INSTANCE != null) { + return ONHEAP_CACHE_INSTANCE.getStats(); } return null; } - public CacheStats getL2Stats() { - if (GLOBAL_L2_CACHE_INSTANCE != null) { - return GLOBAL_L2_CACHE_INSTANCE.getStats(); + public CacheStats getL2CacheStats() { + if (L2_CACHE_INSTANCE != null) { + return L2_CACHE_INSTANCE.getStats(); } return null; } @@ -580,43 +540,26 @@ public class CacheConfig { * @param c Configuration to use. * @return An L1 instance. Currently an instance of LruBlockCache. */ - private synchronized static LruBlockCache getL1Internal(final Configuration c) { - if (GLOBAL_L1_CACHE_INSTANCE != null) return GLOBAL_L1_CACHE_INSTANCE; - final long lruCacheSize = MemorySizeUtil.getLruCacheSize(c); - if (lruCacheSize < 0) { + private synchronized static LruBlockCache getOnHeapCacheInternal(final Configuration c) { + if (ONHEAP_CACHE_INSTANCE != null) { + return ONHEAP_CACHE_INSTANCE; + } + final long cacheSize = MemorySizeUtil.getOnHeapCacheSize(c); + if (cacheSize < 0) { blockCacheDisabled = true; } if (blockCacheDisabled) return null; int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); - LOG.info("Allocating LruBlockCache size=" + - StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); - GLOBAL_L1_CACHE_INSTANCE = new LruBlockCache(lruCacheSize, blockSize, true, c); - return GLOBAL_L1_CACHE_INSTANCE; - } - - /** - * @param c Configuration to use. - * @return Returns L2 block cache instance (for now it is BucketCache BlockCache all the time) - * or null if not supposed to be a L2. - */ - @VisibleForTesting - static BlockCache getL2(final Configuration c) { - final boolean useExternal = c.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); - if (LOG.isDebugEnabled()) { - LOG.debug("Trying to use " + (useExternal?" External":" Internal") + " l2 cache"); - } - - // If we want to use an external block cache then create that. - if (useExternal) { - GLOBAL_L2_CACHE_INSTANCE = getExternalBlockcache(c); - } else { - // otherwise use the bucket cache. - GLOBAL_L2_CACHE_INSTANCE = getBucketCache(c); - } - return GLOBAL_L2_CACHE_INSTANCE; + LOG.info("Allocating On heap LruBlockCache size=" + + StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); + ONHEAP_CACHE_INSTANCE = new LruBlockCache(cacheSize, blockSize, true, c); + return ONHEAP_CACHE_INSTANCE; } private static BlockCache getExternalBlockcache(Configuration c) { + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to use External l2 cache"); + } Class klass = null; // Get the class, from the config. s @@ -642,7 +585,8 @@ public class CacheConfig { } - private static BlockCache getBucketCache(Configuration c) { + @VisibleForTesting + static BucketCache getBucketCache(Configuration c) { // Check for L2. ioengine name must be non-null. String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) return null; @@ -705,30 +649,25 @@ public class CacheConfig { public static synchronized BlockCache instantiateBlockCache(Configuration conf) { if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE; if (blockCacheDisabled) return null; - LruBlockCache l1 = getL1Internal(conf); - // blockCacheDisabled is set as a side-effect of getL1Internal(), so check it again after the call. + LruBlockCache onHeapCache = getOnHeapCacheInternal(conf); + // blockCacheDisabled is set as a side-effect of getL1Internal(), so check it again after the + // call. if (blockCacheDisabled) return null; - BlockCache l2 = getL2(conf); - if (l2 == null) { - GLOBAL_BLOCK_CACHE_INSTANCE = l1; + boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); + if (useExternal) { + L2_CACHE_INSTANCE = getExternalBlockcache(conf); + GLOBAL_BLOCK_CACHE_INSTANCE = L2_CACHE_INSTANCE == null ? onHeapCache + : new InclusiveCombinedBlockCache(onHeapCache, L2_CACHE_INSTANCE); } else { - boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); - boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY, - DEFAULT_BUCKET_CACHE_COMBINED); - if (useExternal) { - GLOBAL_BLOCK_CACHE_INSTANCE = new InclusiveCombinedBlockCache(l1, l2); - } else { - if (combinedWithLru) { - GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2); - } else { - // L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler - // mechanism. It is a little ugly but works according to the following: when the - // background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get - // a block from the L1 cache, if not in L1, we will search L2. - GLOBAL_BLOCK_CACHE_INSTANCE = l1; - } + // otherwise use the bucket cache. + L2_CACHE_INSTANCE = getBucketCache(conf); + if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) { + // Non combined mode is off from 2.0 + LOG.warn( + "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); } - l1.setVictimCache(l2); + GLOBAL_BLOCK_CACHE_INSTANCE = L2_CACHE_INSTANCE == null ? onHeapCache + : new CombinedBlockCache(onHeapCache, L2_CACHE_INSTANCE); } return GLOBAL_BLOCK_CACHE_INSTANCE; } @@ -736,8 +675,8 @@ public class CacheConfig { // Supposed to use only from tests. Some tests want to reinit the Global block cache instance @VisibleForTesting static synchronized void clearGlobalInstances() { - GLOBAL_L1_CACHE_INSTANCE = null; - GLOBAL_L2_CACHE_INSTANCE = null; + ONHEAP_CACHE_INSTANCE = null; + L2_CACHE_INSTANCE = null; GLOBAL_BLOCK_CACHE_INSTANCE = null; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 201a41bc70d..6dd69074e8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -31,23 +31,21 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe /** * CombinedBlockCache is an abstraction layer that combines * {@link LruBlockCache} and {@link BucketCache}. The smaller lruCache is used - * to cache bloom blocks and index blocks. The larger l2Cache is used to + * to cache bloom blocks and index blocks. The larger Cache is used to * cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads - * first from the smaller lruCache before looking for the block in the l2Cache. Blocks evicted - * from lruCache are put into the bucket cache. + * first from the smaller lruCache before looking for the block in the l2Cache. * Metrics are the combined size and hits and misses of both caches. - * */ @InterfaceAudience.Private public class CombinedBlockCache implements ResizableBlockCache, HeapSize { - protected final LruBlockCache lruCache; + protected final LruBlockCache onHeapCache; protected final BlockCache l2Cache; protected final CombinedCacheStats combinedCacheStats; - public CombinedBlockCache(LruBlockCache lruCache, BlockCache l2Cache) { - this.lruCache = lruCache; + public CombinedBlockCache(LruBlockCache onHeapCache, BlockCache l2Cache) { + this.onHeapCache = onHeapCache; this.l2Cache = l2Cache; - this.combinedCacheStats = new CombinedCacheStats(lruCache.getStats(), + this.combinedCacheStats = new CombinedCacheStats(onHeapCache.getStats(), l2Cache.getStats()); } @@ -57,23 +55,22 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { if (l2Cache instanceof HeapSize) { l2size = ((HeapSize) l2Cache).heapSize(); } - return lruCache.heapSize() + l2size; + return onHeapCache.heapSize() + l2size; } @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, - final boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { boolean metaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA; - if (metaBlock || cacheDataInL1) { - lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1); + if (metaBlock) { + onHeapCache.cacheBlock(cacheKey, buf, inMemory); } else { - l2Cache.cacheBlock(cacheKey, buf, inMemory, false); + l2Cache.cacheBlock(cacheKey, buf, inMemory); } } @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { - cacheBlock(cacheKey, buf, false, false); + cacheBlock(cacheKey, buf, false); } @Override @@ -81,19 +78,21 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { boolean repeat, boolean updateCacheMetrics) { // TODO: is there a hole here, or just awkwardness since in the lruCache getBlock // we end up calling l2Cache.getBlock. - return lruCache.containsBlock(cacheKey)? - lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics): + // We are not in a position to exactly look at LRU cache or BC as BlockType may not be getting + // passed always. + return onHeapCache.containsBlock(cacheKey)? + onHeapCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics): l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } @Override public boolean evictBlock(BlockCacheKey cacheKey) { - return lruCache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey); + return onHeapCache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey); } @Override public int evictBlocksByHfileName(String hfileName) { - return lruCache.evictBlocksByHfileName(hfileName) + return onHeapCache.evictBlocksByHfileName(hfileName) + l2Cache.evictBlocksByHfileName(hfileName); } @@ -104,43 +103,43 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public void shutdown() { - lruCache.shutdown(); + onHeapCache.shutdown(); l2Cache.shutdown(); } @Override public long size() { - return lruCache.size() + l2Cache.size(); + return onHeapCache.size() + l2Cache.size(); } @Override public long getMaxSize() { - return lruCache.getMaxSize() + l2Cache.getMaxSize(); + return onHeapCache.getMaxSize() + l2Cache.getMaxSize(); } @Override public long getCurrentDataSize() { - return lruCache.getCurrentDataSize() + l2Cache.getCurrentDataSize(); + return onHeapCache.getCurrentDataSize() + l2Cache.getCurrentDataSize(); } @Override public long getFreeSize() { - return lruCache.getFreeSize() + l2Cache.getFreeSize(); + return onHeapCache.getFreeSize() + l2Cache.getFreeSize(); } @Override public long getCurrentSize() { - return lruCache.getCurrentSize() + l2Cache.getCurrentSize(); + return onHeapCache.getCurrentSize() + l2Cache.getCurrentSize(); } @Override public long getBlockCount() { - return lruCache.getBlockCount() + l2Cache.getBlockCount(); + return onHeapCache.getBlockCount() + l2Cache.getBlockCount(); } @Override public long getDataBlockCount() { - return lruCache.getDataBlockCount() + l2Cache.getDataBlockCount(); + return onHeapCache.getDataBlockCount() + l2Cache.getDataBlockCount(); } public static class CombinedCacheStats extends CacheStats { @@ -363,12 +362,12 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public BlockCache[] getBlockCaches() { - return new BlockCache [] {this.lruCache, this.l2Cache}; + return new BlockCache [] {this.onHeapCache, this.l2Cache}; } @Override public void setMaxSize(long size) { - this.lruCache.setMaxSize(size); + this.onHeapCache.setMaxSize(size); } @Override @@ -379,6 +378,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @VisibleForTesting public int getRefCount(BlockCacheKey cacheKey) { - return ((BucketCache) this.l2Cache).getRefCount(cacheKey); + return (this.l2Cache instanceof BucketCache) + ? ((BucketCache) this.l2Cache).getRefCount(cacheKey) : 0; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index f216f4219c2..5021b4d1dea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1408,8 +1408,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // Cache the block if (cacheBlock) { - cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock, - cacheConf.isInMemory(), this.cacheConf.isCacheDataInL1()); + cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock, cacheConf.isInMemory()); } return metaBlock; @@ -1495,7 +1494,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category)) { cacheConf.getBlockCache().cacheBlock(cacheKey, cacheConf.shouldCacheCompressed(category) ? hfileBlock : unpacked, - cacheConf.isInMemory(), this.cacheConf.isCacheDataInL1()); + cacheConf.isInMemory()); } if (updateCacheMetrics && hfileBlock.getBlockType().isData()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java index 4d7126e8880..823576f3b51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java @@ -19,13 +19,13 @@ package org.apache.hadoop.hbase.io.hfile; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; -@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class InclusiveCombinedBlockCache extends CombinedBlockCache implements BlockCache { +@InterfaceAudience.Private +public class InclusiveCombinedBlockCache extends CombinedBlockCache { public InclusiveCombinedBlockCache(LruBlockCache l1, BlockCache l2) { super(l1,l2); + l1.setVictimCache(l2); } @Override @@ -34,7 +34,7 @@ public class InclusiveCombinedBlockCache extends CombinedBlockCache implements B // On all external cache set ups the lru should have the l2 cache set as the victimHandler // Because of that all requests that miss inside of the lru block cache will be // tried in the l2 block cache. - return lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + return onHeapCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } /** @@ -43,16 +43,21 @@ public class InclusiveCombinedBlockCache extends CombinedBlockCache implements B * @param buf The block contents wrapped in a ByteBuffer. * @param inMemory Whether block should be treated as in-memory. This parameter is only useful for * the L1 lru cache. - * @param cacheDataInL1 This is totally ignored. */ @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, - final boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { // This is the inclusive part of the combined block cache. // Every block is placed into both block caches. - lruCache.cacheBlock(cacheKey, buf, inMemory, true); + onHeapCache.cacheBlock(cacheKey, buf, inMemory); // This assumes that insertion into the L2 block cache is either async or very fast. - l2Cache.cacheBlock(cacheKey, buf, inMemory, true); + l2Cache.cacheBlock(cacheKey, buf, inMemory); + } + + @Override + public boolean evictBlock(BlockCacheKey cacheKey) { + boolean l1Result = this.onHeapCache.evictBlock(cacheKey); + boolean l2Result = this.l2Cache.evictBlock(cacheKey); + return l1Result || l2Result; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 0fde0a7ee40..3733535df17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -40,7 +40,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.HasThread; @@ -223,7 +222,11 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { /** Whether in-memory hfile's data block has higher priority when evicting */ private boolean forceInMemory; - /** Where to send victims (blocks evicted/missing from the cache) */ + /** + * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an + * external cache as L2. + * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + */ private BlockCache victimHandler = null; /** @@ -360,9 +363,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * @param inMemory if block is in-memory */ @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, - final boolean cacheDataInL1) { - + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { if (buf.heapSize() > maxBlockSize) { // If there are a lot of blocks that are too // big this can make the logs way too noisy. @@ -448,7 +449,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * @param buf block buffer */ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { - cacheBlock(cacheKey, buf, false, false); + cacheBlock(cacheKey, buf, false); } /** @@ -499,7 +500,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { if (result instanceof HFileBlock && ((HFileBlock) result).usesSharedMemory()) { result = ((HFileBlock) result).deepClone(); } - cacheBlock(cacheKey, result, /* inMemory = */ false, /* cacheData = */ true); + cacheBlock(cacheKey, result, /* inMemory = */ false); } return result; } @@ -577,14 +578,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { // update the stats counter. stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary()); if (victimHandler != null) { - if (victimHandler instanceof BucketCache) { - boolean wait = getCurrentSize() < acceptableSize(); - boolean inMemory = block.getPriority() == BlockPriority.MEMORY; - ((BucketCache) victimHandler).cacheBlockWithWait(block.getCacheKey(), block.getBuffer(), - inMemory, true, wait); - } else { - victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer()); - } + victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer()); } } return block.heapSize(); @@ -1179,10 +1173,6 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return map; } - BlockCache getVictimHandler() { - return this.victimHandler; - } - @Override @JsonIgnore public BlockCache[] getBlockCaches() { @@ -1190,16 +1180,4 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return new BlockCache[] {this, this.victimHandler}; return null; } - - @Override - public void returnBlock(BlockCacheKey cacheKey, Cacheable block) { - // There is no SHARED type here in L1. But the block might have been served from the Victim - // handler L2 cache. (when the Combined mode = false). So just try return this block to - // L2 victim handler cache. - // Note : In case of CombinedBlockCache, we will have this victimHandler configured for L1 - // cache. But CombinedBlockCache will only call returnBlock on L2 cache. - if (this.victimHandler != null) { - this.victimHandler.returnBlock(cacheKey, block); - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index b0011d799ac..d03f57ab7a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -397,7 +397,7 @@ public class BucketCache implements BlockCache, HeapSize { */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { - cacheBlock(cacheKey, buf, false, false); + cacheBlock(cacheKey, buf, false); } /** @@ -405,12 +405,10 @@ public class BucketCache implements BlockCache, HeapSize { * @param cacheKey block's cache key * @param cachedItem block buffer * @param inMemory if block is in-memory - * @param cacheDataInL1 */ @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, - final boolean cacheDataInL1) { - cacheBlockWithWait(cacheKey, cachedItem, inMemory, cacheDataInL1, wait_when_cache); + public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory) { + cacheBlockWithWait(cacheKey, cachedItem, inMemory, wait_when_cache); } /** @@ -421,23 +419,18 @@ public class BucketCache implements BlockCache, HeapSize { * @param wait if true, blocking wait when queue is full */ public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, - boolean cacheDataInL1, boolean wait) { + boolean wait) { if (LOG.isTraceEnabled()) LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem); if (!cacheEnabled) { return; } if (backingMap.containsKey(cacheKey)) { - /* - * Compare already cached block only if lruBlockCache is not used to cache data blocks - */ - if (!cacheDataInL1) { - Cacheable existingBlock = getBlock(cacheKey, false, false, false); - if (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) { - throw new RuntimeException("Cached block contents differ, which should not have happened." - + "cacheKey:" + cacheKey); - } - } + Cacheable existingBlock = getBlock(cacheKey, false, false, false); + if (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) { + throw new RuntimeException("Cached block contents differ, which should not have happened." + + "cacheKey:" + cacheKey); + } String msg = "Caching an already cached block: " + cacheKey; msg += ". This is harmless and can happen in rare cases (see HBASE-8547)"; LOG.warn(msg); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java index 3689643372f..1f2025278d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java @@ -213,7 +213,7 @@ public class MemorySizeUtil { * @return the number of bytes to use for LRU, negative if disabled. * @throws IllegalArgumentException if HFILE_BLOCK_CACHE_SIZE_KEY is > 1.0 */ - public static long getLruCacheSize(final Configuration conf) { + public static long getOnHeapCacheSize(final Configuration conf) { float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); if (cachePercentage <= 0.0001f) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index b64937e6034..cfdb32dd8d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -107,9 +107,9 @@ public class HeapMemoryManager { public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher, Server server, RegionServerAccounting regionServerAccounting) { - ResizableBlockCache l1Cache = CacheConfig.getL1(conf); - if (l1Cache != null) { - return new HeapMemoryManager(l1Cache, memStoreFlusher, server, regionServerAccounting); + ResizableBlockCache lruCache = CacheConfig.getOnHeapCache(conf); + if (lruCache != null) { + return new HeapMemoryManager(lruCache, memStoreFlusher, server, regionServerAccounting); } return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 75d8e56fbaa..f65bb66a9ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -153,8 +153,8 @@ class MetricsRegionServerWrapperImpl private synchronized void initBlockCache() { CacheConfig cacheConfig = this.regionServer.cacheConfig; if (cacheConfig != null) { - l1Stats = cacheConfig.getL1Stats(); - l2Stats = cacheConfig.getL2Stats(); + l1Stats = cacheConfig.getOnHeapCacheStats(); + l2Stats = cacheConfig.getL2CacheStats(); if (this.blockCache == null) { this.blockCache = cacheConfig.getBlockCache(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 3ed2ee354c1..2e2d263ee3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1326,10 +1326,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, setBlockCacheEnabled(true). setBlocksize(8 * 1024). setBloomFilterType(BloomType.NONE). - setScope(HConstants.REPLICATION_SCOPE_LOCAL). - // Set cache data blocks in L1 if more than one cache tier deployed; e.g. this will - // be the case if we are using CombinedBlockCache (Bucket Cache). - setCacheDataInL1(true).build(); + setScope(HConstants.REPLICATION_SCOPE_LOCAL).build(); TableDescriptor td = TableDescriptorBuilder.newBuilder(AccessControlLists.ACL_TABLE_NAME). addColumnFamily(cfd).build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 0b926c4f4c0..d7db89bd6b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -155,9 +155,6 @@ public class FSTableDescriptors implements TableDescriptors { .setScope(HConstants.REPLICATION_SCOPE_LOCAL) // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true) .build()) .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY) .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, @@ -168,9 +165,6 @@ public class FSTableDescriptors implements TableDescriptors { .setScope(HConstants.REPLICATION_SCOPE_LOCAL) // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true) .build()) .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_POSITION_FAMILY) .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, @@ -181,9 +175,6 @@ public class FSTableDescriptors implements TableDescriptors { .setScope(HConstants.REPLICATION_SCOPE_LOCAL) // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true) .build()) .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_META_FAMILY) .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, @@ -194,9 +185,6 @@ public class FSTableDescriptors implements TableDescriptors { .setScope(HConstants.REPLICATION_SCOPE_LOCAL) // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true) .build()) .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY) // Ten is arbitrary number. Keep versions to help debugging. @@ -206,9 +194,6 @@ public class FSTableDescriptors implements TableDescriptors { .setScope(HConstants.REPLICATION_SCOPE_LOCAL) // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true) .build()) .addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint", null, Coprocessor.PRIORITY_SYSTEM, null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index 710d4089bb8..e1ae654b5bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.MemoryUsage; import java.nio.ByteBuffer; -import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -186,7 +185,7 @@ public class TestCacheConfig { Cacheable c = new DataCacheEntry(); // Do asserts on block counting. long initialBlockCount = bc.getBlockCount(); - bc.cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1()); + bc.cacheBlock(bck, c, cc.isInMemory()); assertEquals(doubling? 2: 1, bc.getBlockCount() - initialBlockCount); bc.evictBlock(bck); assertEquals(initialBlockCount, bc.getBlockCount()); @@ -194,7 +193,7 @@ public class TestCacheConfig { // buffers do lazy allocation so sizes are off on first go around. if (sizing) { long originalSize = bc.getCurrentSize(); - bc.cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1()); + bc.cacheBlock(bck, c, cc.isInMemory()); assertTrue(bc.getCurrentSize() > originalSize); bc.evictBlock(bck); long size = bc.getCurrentSize(); @@ -202,19 +201,6 @@ public class TestCacheConfig { } } - /** - * @param cc - * @param filename - * @return - */ - private long cacheDataBlock(final CacheConfig cc, final String filename) { - BlockCacheKey bck = new BlockCacheKey(filename, 0); - Cacheable c = new DataCacheEntry(); - // Do asserts on block counting. - cc.getBlockCache().cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1()); - return cc.getBlockCache().getBlockCount(); - } - @Test public void testDisableCacheDataBlock() throws IOException { Configuration conf = HBaseConfiguration.create(); @@ -324,7 +310,7 @@ public class TestCacheConfig { BlockCache [] bcs = cbc.getBlockCaches(); assertTrue(bcs[0] instanceof LruBlockCache); LruBlockCache lbc = (LruBlockCache)bcs[0]; - assertEquals(MemorySizeUtil.getLruCacheSize(this.conf), lbc.getMaxSize()); + assertEquals(MemorySizeUtil.getOnHeapCacheSize(this.conf), lbc.getMaxSize()); assertTrue(bcs[1] instanceof BucketCache); BucketCache bc = (BucketCache)bcs[1]; // getMaxSize comes back in bytes but we specified size in MB @@ -342,19 +328,19 @@ public class TestCacheConfig { // from L1 happens, it does not fail because L2 can't take the eviction because block too big. this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f); MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); - long lruExpectedSize = MemorySizeUtil.getLruCacheSize(this.conf); + long lruExpectedSize = MemorySizeUtil.getOnHeapCacheSize(this.conf); final int bcSize = 100; long bcExpectedSize = 100 * 1024 * 1024; // MB. assertTrue(lruExpectedSize < bcExpectedSize); this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); - this.conf.setBoolean(CacheConfig.BUCKET_CACHE_COMBINED_KEY, false); CacheConfig cc = new CacheConfig(this.conf); basicBlockCacheOps(cc, false, false); - assertTrue(cc.getBlockCache() instanceof LruBlockCache); + assertTrue(cc.getBlockCache() instanceof CombinedBlockCache); // TODO: Assert sizes allocated are right and proportions. - LruBlockCache lbc = (LruBlockCache)cc.getBlockCache(); + CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache(); + LruBlockCache lbc = cbc.onHeapCache; assertEquals(lruExpectedSize, lbc.getMaxSize()); - BlockCache bc = lbc.getVictimHandler(); + BlockCache bc = cbc.l2Cache; // getMaxSize comes back in bytes but we specified size in MB assertEquals(bcExpectedSize, ((BucketCache) bc).getMaxSize()); // Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2. @@ -362,7 +348,7 @@ public class TestCacheConfig { long initialL2BlockCount = bc.getBlockCount(); Cacheable c = new DataCacheEntry(); BlockCacheKey bck = new BlockCacheKey("bck", 0); - lbc.cacheBlock(bck, c, false, false); + lbc.cacheBlock(bck, c, false); assertEquals(initialL1BlockCount + 1, lbc.getBlockCount()); assertEquals(initialL2BlockCount, bc.getBlockCount()); // Force evictions by putting in a block too big. @@ -381,32 +367,6 @@ public class TestCacheConfig { // The eviction thread in lrublockcache needs to run. while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10); assertEquals(initialL1BlockCount, lbc.getBlockCount()); - long count = bc.getBlockCount(); - assertTrue(initialL2BlockCount + 1 <= count); - } - - /** - * Test the cacheDataInL1 flag. When set, data blocks should be cached in the l1 tier, up in - * LruBlockCache when using CombinedBlockCcahe. - */ - @Test - public void testCacheDataInL1() { - this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); - this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 100); - CacheConfig cc = new CacheConfig(this.conf); - assertTrue(cc.getBlockCache() instanceof CombinedBlockCache); - CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache(); - // Add a data block. Should go into L2, into the Bucket Cache, not the LruBlockCache. - cacheDataBlock(cc, "1"); - LruBlockCache lrubc = (LruBlockCache)cbc.getBlockCaches()[0]; - assertDataBlockCount(lrubc, 0); - // Enable our test flag. - cc.setCacheDataInL1(true); - cacheDataBlock(cc, "2"); - assertDataBlockCount(lrubc, 1); - cc.setCacheDataInL1(false); - cacheDataBlock(cc, "3"); - assertDataBlockCount(lrubc, 1); } @Test @@ -416,16 +376,9 @@ public class TestCacheConfig { c.set(CacheConfig.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096"); c.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 1024); try { - CacheConfig.getL2(c); + CacheConfig.getBucketCache(c); fail("Should throw IllegalArgumentException when passing illegal value for bucket size"); } catch (IllegalArgumentException e) { } } - - private void assertDataBlockCount(final LruBlockCache bc, final int expected) { - Map blocks = bc.getBlockTypeCountsForTest(); - assertEquals(expected, blocks == null? 0: - blocks.get(BlockType.DATA) == null? 0: - blocks.get(BlockType.DATA).intValue()); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 47bf5a42777..9535a461c45 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -230,7 +230,7 @@ public class TestCacheOnWrite { new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA), cowType.shouldBeCached(BlockType.LEAF_INDEX), cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, - false, false, false); + false, false); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 4eec0bfbab8..af169f50269 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -80,7 +80,7 @@ public class TestLruBlockCache { for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) { CachedItem block = new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement()); boolean inMemory = Math.random() > 0.5; - cache.cacheBlock(block.cacheKey, block, inMemory, false); + cache.cacheBlock(block.cacheKey, block, inMemory); } cache.evictBlocksByHfileName(hfileName); } @@ -350,7 +350,7 @@ public class TestLruBlockCache { cache.getBlock(multiBlocks[i].cacheKey, true, false, true); // Add memory blocks as such - cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true, false); + cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true); expectedCacheSize += memoryBlocks[i].cacheBlockHeapSize(); } @@ -385,7 +385,7 @@ public class TestLruBlockCache { assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); // Insert another memory block - cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true, false); + cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); // Three evictions, three evicted. assertEquals(3, cache.getStats().getEvictionCount()); @@ -423,7 +423,7 @@ public class TestLruBlockCache { assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true)); // Cache a big memory block - cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true, false); + cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true); // Six evictions, twelve evicted (3 new) assertEquals(6, cache.getStats().getEvictionCount()); @@ -478,7 +478,7 @@ public class TestLruBlockCache { assertEquals(expectedCacheSize, cache.heapSize()); // 1. Insert a memory block, oldest single should be evicted, si:mu:me = 4:4:1 - cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true, false); + cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true); // Single eviction, one block evicted assertEquals(1, cache.getStats().getEvictionCount()); assertEquals(1, cache.getStats().getEvictedCount()); @@ -486,7 +486,7 @@ public class TestLruBlockCache { assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); // 2. Insert another memory block, another single evicted, si:mu:me = 3:4:2 - cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true, false); + cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true); // Two evictions, two evicted. assertEquals(2, cache.getStats().getEvictionCount()); assertEquals(2, cache.getStats().getEvictedCount()); @@ -494,10 +494,10 @@ public class TestLruBlockCache { assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); // 3. Insert 4 memory blocks, 2 single and 2 multi evicted, si:mu:me = 1:2:6 - cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true, false); - cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true, false); - cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true, false); - cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true, false); + cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true); + cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); + cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true); + cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true); // Three evictions, three evicted. assertEquals(6, cache.getStats().getEvictionCount()); assertEquals(6, cache.getStats().getEvictedCount()); @@ -509,9 +509,9 @@ public class TestLruBlockCache { // 4. Insert 3 memory blocks, the remaining 1 single and 2 multi evicted // si:mu:me = 0:0:9 - cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true, false); - cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true, false); - cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true, false); + cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true); + cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true); + cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true); // Three evictions, three evicted. assertEquals(9, cache.getStats().getEvictionCount()); assertEquals(9, cache.getStats().getEvictedCount()); @@ -522,7 +522,7 @@ public class TestLruBlockCache { // 5. Insert one memory block, the oldest memory evicted // si:mu:me = 0:0:9 - cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true, false); + cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true); // one eviction, one evicted. assertEquals(10, cache.getStats().getEvictionCount()); assertEquals(10, cache.getStats().getEvictedCount()); @@ -679,7 +679,7 @@ public class TestLruBlockCache { cache.getBlock(multiBlocks[i].cacheKey, true, false, true); // Add memory blocks as such - cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true, false); + cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true); } // Do not expect any evictions yet diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java index 1c7f951ebf9..83edf8a56ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java @@ -104,12 +104,11 @@ public class TestBucketCache { } @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, - boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { if (super.getBlock(cacheKey, true, false, true) != null) { throw new RuntimeException("Cached an already cached block"); } - super.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1); + super.cacheBlock(cacheKey, buf, inMemory); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index 76b4faf841f..c71e0c77393 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -645,8 +645,7 @@ public class TestHeapMemoryManager { } @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, - boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { }