Revert "HBASE-19357 Bucket cache no longer L2 for LRU cache."
This reverts commit d952c5df9f
.
This commit is contained in:
parent
d952c5df9f
commit
78a6e0532e
|
@ -52,7 +52,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
|
|||
public static final String CACHE_INDEX_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_INDEX_ON_WRITE;
|
||||
public static final String CACHE_BLOOMS_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_BLOOMS_ON_WRITE;
|
||||
public static final String EVICT_BLOCKS_ON_CLOSE = ColumnFamilyDescriptorBuilder.EVICT_BLOCKS_ON_CLOSE;
|
||||
public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
|
||||
public static final String CACHE_DATA_IN_L1 = ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1;
|
||||
public static final String PREFETCH_BLOCKS_ON_OPEN = ColumnFamilyDescriptorBuilder.PREFETCH_BLOCKS_ON_OPEN;
|
||||
public static final String BLOCKSIZE = ColumnFamilyDescriptorBuilder.BLOCKSIZE;
|
||||
public static final String LENGTH = "LENGTH";
|
||||
|
@ -87,7 +87,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
|
|||
public static final KeepDeletedCells DEFAULT_KEEP_DELETED = ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED;
|
||||
public static final boolean DEFAULT_BLOCKCACHE = ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKCACHE;
|
||||
public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_ON_WRITE;
|
||||
public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
|
||||
public static final boolean DEFAULT_CACHE_DATA_IN_L1 = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1;
|
||||
public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_INDEX_ON_WRITE;
|
||||
public static final int DEFAULT_BLOCKSIZE = ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE;
|
||||
public static final String DEFAULT_BLOOMFILTER = ColumnFamilyDescriptorBuilder.DEFAULT_BLOOMFILTER.name();
|
||||
|
@ -534,16 +534,18 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
|
|||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCacheDataInL1() {
|
||||
return delegatee.isCacheDataInL1();
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a noop call from HBase 2.0 onwards
|
||||
*
|
||||
* @param value true if we should cache data blocks in the L1 cache (if block cache deploy
|
||||
* has more than one tier; e.g. we are using CombinedBlockCache).
|
||||
* @return this (for chained invocation)
|
||||
* @deprecated Since 2.0 and will be removed in 3.0 with out any replacement. Caching data in on
|
||||
* heap Cache, when there are both on heap LRU Cache and Bucket Cache will no longer
|
||||
* be supported from 2.0.
|
||||
*/
|
||||
@Deprecated
|
||||
public HColumnDescriptor setCacheDataInL1(boolean value) {
|
||||
getDelegateeForModification().setCacheDataInL1(value);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -202,7 +202,11 @@ public interface ColumnFamilyDescriptor {
|
|||
* @return true if we should cache bloomfilter blocks on write
|
||||
*/
|
||||
boolean isCacheBloomsOnWrite();
|
||||
|
||||
/**
|
||||
* @return true if we should cache data blocks in the L1 cache (if block cache deploy has more
|
||||
* than one tier; e.g. we are using CombinedBlockCache).
|
||||
*/
|
||||
boolean isCacheDataInL1();
|
||||
/**
|
||||
* @return true if we should cache data blocks on write
|
||||
*/
|
||||
|
|
|
@ -98,6 +98,14 @@ public class ColumnFamilyDescriptorBuilder {
|
|||
@InterfaceAudience.Private
|
||||
public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE";
|
||||
private static final Bytes EVICT_BLOCKS_ON_CLOSE_BYTES = new Bytes(Bytes.toBytes(EVICT_BLOCKS_ON_CLOSE));
|
||||
/**
|
||||
* Key for cache data into L1 if cache is set up with more than one tier. To
|
||||
* set in the shell, do something like this: <code>hbase(main):003:0> create 't',
|
||||
* {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</code>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
|
||||
private static final Bytes CACHE_DATA_IN_L1_BYTES = new Bytes(Bytes.toBytes(CACHE_DATA_IN_L1));
|
||||
|
||||
/**
|
||||
* Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM,
|
||||
|
@ -220,6 +228,13 @@ public class ColumnFamilyDescriptorBuilder {
|
|||
*/
|
||||
public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
|
||||
|
||||
/**
|
||||
* Default setting for whether to cache data blocks in L1 tier. Only makes
|
||||
* sense if more than one tier in operations: i.e. if we have an L1 and a L2.
|
||||
* This will be the cases if we are using BucketCache.
|
||||
*/
|
||||
public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false;
|
||||
|
||||
/**
|
||||
* Default setting for whether to cache index blocks on write if block caching
|
||||
* is enabled.
|
||||
|
@ -295,6 +310,7 @@ public class ColumnFamilyDescriptorBuilder {
|
|||
DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
|
||||
DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
|
||||
DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
|
||||
DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1));
|
||||
DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
|
||||
DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
|
||||
DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
|
||||
|
@ -428,6 +444,11 @@ public class ColumnFamilyDescriptorBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
public ColumnFamilyDescriptorBuilder setCacheDataInL1(boolean value) {
|
||||
desc.setCacheDataInL1(value);
|
||||
return this;
|
||||
}
|
||||
|
||||
public ColumnFamilyDescriptorBuilder setCacheDataOnWrite(boolean value) {
|
||||
desc.setCacheDataOnWrite(value);
|
||||
return this;
|
||||
|
@ -989,6 +1010,21 @@ public class ColumnFamilyDescriptorBuilder {
|
|||
return setValue(CACHE_DATA_ON_WRITE_BYTES, Boolean.toString(value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCacheDataInL1() {
|
||||
return getStringOrDefault(CACHE_DATA_IN_L1_BYTES, Boolean::valueOf, DEFAULT_CACHE_DATA_IN_L1);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param value true if we should cache data blocks in the L1 cache (if
|
||||
* block cache deploy has more than one tier; e.g. we are using
|
||||
* CombinedBlockCache).
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public ModifyableColumnFamilyDescriptor setCacheDataInL1(boolean value) {
|
||||
return setValue(CACHE_DATA_IN_L1_BYTES, Boolean.toString(value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCacheIndexesOnWrite() {
|
||||
return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_INDEX_ON_WRITE);
|
||||
|
|
|
@ -226,6 +226,9 @@ public class TableDescriptorBuilder {
|
|||
.setInMemory(true)
|
||||
.setBlocksize(8 * 1024)
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true)
|
||||
.build())
|
||||
.build();
|
||||
private final ModifyableTableDescriptor desc;
|
||||
|
|
|
@ -45,7 +45,8 @@ public class TestColumnFamilyDescriptorBuilder {
|
|||
= ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
|
||||
.setInMemory(true)
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
.setBloomFilterType(BloomType.NONE);
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
.setCacheDataInL1(true);
|
||||
final int v = 123;
|
||||
builder.setBlocksize(v);
|
||||
builder.setTimeToLive(v);
|
||||
|
|
|
@ -51,6 +51,7 @@ public class TestImmutableHColumnDescriptor {
|
|||
hcd -> hcd.setBlocksize(10),
|
||||
hcd -> hcd.setBloomFilterType(BloomType.NONE),
|
||||
hcd -> hcd.setCacheBloomsOnWrite(false),
|
||||
hcd -> hcd.setCacheDataInL1(true),
|
||||
hcd -> hcd.setCacheDataOnWrite(true),
|
||||
hcd -> hcd.setCacheIndexesOnWrite(true),
|
||||
hcd -> hcd.setCompactionCompressionType(Compression.Algorithm.LZO),
|
||||
|
|
|
@ -900,6 +900,13 @@ possible configurations would overwhelm and obscure the important.
|
|||
See http://hbase.apache.org/book.html#offheap.blockcache for more information.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.bucketcache.combinedcache.enabled</name>
|
||||
<value>true</value>
|
||||
<description>Whether or not the bucketcache is used in league with the LRU
|
||||
on-heap block cache. In this mode, indices and blooms are kept in the LRU
|
||||
blockcache and the data blocks are kept in the bucketcache.</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.bucketcache.size</name>
|
||||
<value></value>
|
||||
|
|
|
@ -110,7 +110,10 @@ public class MemcachedBlockCache implements BlockCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
|
||||
public void cacheBlock(BlockCacheKey cacheKey,
|
||||
Cacheable buf,
|
||||
boolean inMemory,
|
||||
boolean cacheDataInL1) {
|
||||
cacheBlock(cacheKey, buf);
|
||||
}
|
||||
|
||||
|
@ -285,4 +288,10 @@ public class MemcachedBlockCache implements BlockCache {
|
|||
return MAX_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void returnBlock(BlockCacheKey cacheKey, Cacheable block) {
|
||||
// Not doing reference counting. All blocks here are EXCLUSIVE
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,8 +34,11 @@ public interface BlockCache extends Iterable<CachedBlock> {
|
|||
* @param cacheKey The block's cache key.
|
||||
* @param buf The block contents wrapped in a ByteBuffer.
|
||||
* @param inMemory Whether block should be treated as in-memory
|
||||
* @param cacheDataInL1 If multi-tier block cache deploy -- i.e. has an L1 and L2 tier -- then
|
||||
* if this flag is true, cache data blocks up in the L1 tier (meta blocks are probably being
|
||||
* cached in L1 already).
|
||||
*/
|
||||
void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory);
|
||||
void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, boolean cacheDataInL1);
|
||||
|
||||
/**
|
||||
* Add block to cache (defaults to not in-memory).
|
||||
|
@ -143,5 +146,5 @@ public interface BlockCache extends Iterable<CachedBlock> {
|
|||
* @param cacheKey the cache key of the block
|
||||
* @param block the hfileblock to be returned
|
||||
*/
|
||||
default void returnBlock(BlockCacheKey cacheKey, Cacheable block){};
|
||||
void returnBlock(BlockCacheKey cacheKey, Cacheable block);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
|
||||
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
|
||||
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
|
||||
|
@ -108,6 +109,14 @@ public class CacheConfig {
|
|||
public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY =
|
||||
"hbase.bucketcache.persistent.path";
|
||||
|
||||
/**
|
||||
* If the bucket cache is used in league with the lru on-heap block cache (meta blocks such
|
||||
* as indices and blooms are kept in the lru blockcache and the data blocks in the
|
||||
* bucket cache).
|
||||
*/
|
||||
public static final String BUCKET_CACHE_COMBINED_KEY =
|
||||
"hbase.bucketcache.combinedcache.enabled";
|
||||
|
||||
public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads";
|
||||
public static final String BUCKET_CACHE_WRITER_QUEUE_KEY =
|
||||
"hbase.bucketcache.writer.queuelength";
|
||||
|
@ -120,6 +129,7 @@ public class CacheConfig {
|
|||
/**
|
||||
* Defaults for Bucket cache
|
||||
*/
|
||||
public static final boolean DEFAULT_BUCKET_CACHE_COMBINED = true;
|
||||
public static final int DEFAULT_BUCKET_CACHE_WRITER_THREADS = 3;
|
||||
public static final int DEFAULT_BUCKET_CACHE_WRITER_QUEUE = 64;
|
||||
|
||||
|
@ -208,6 +218,13 @@ public class CacheConfig {
|
|||
/** Whether data blocks should be prefetched into the cache */
|
||||
private final boolean prefetchOnOpen;
|
||||
|
||||
/**
|
||||
* If true and if more than one tier in this cache deploy -- e.g. CombinedBlockCache has an L1
|
||||
* and an L2 tier -- then cache data blocks up in the L1 tier (The meta blocks are likely being
|
||||
* cached up in L1 already. At least this is the case if CombinedBlockCache).
|
||||
*/
|
||||
private boolean cacheDataInL1;
|
||||
|
||||
private final boolean dropBehindCompaction;
|
||||
|
||||
/**
|
||||
|
@ -234,6 +251,8 @@ public class CacheConfig {
|
|||
conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
|
||||
conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY,
|
||||
DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(),
|
||||
conf.getBoolean(ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1,
|
||||
ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(),
|
||||
conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
|
||||
);
|
||||
LOG.info("Created cacheConfig for " + family.getNameAsString() + ": " + this);
|
||||
|
@ -257,6 +276,8 @@ public class CacheConfig {
|
|||
conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE),
|
||||
conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
|
||||
conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN),
|
||||
conf.getBoolean(ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1,
|
||||
ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1),
|
||||
conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
|
||||
);
|
||||
LOG.info("Created cacheConfig: " + this);
|
||||
|
@ -284,7 +305,7 @@ public class CacheConfig {
|
|||
final boolean cacheDataOnWrite, final boolean cacheIndexesOnWrite,
|
||||
final boolean cacheBloomsOnWrite, final boolean evictOnClose,
|
||||
final boolean cacheDataCompressed, final boolean prefetchOnOpen,
|
||||
final boolean dropBehindCompaction) {
|
||||
final boolean cacheDataInL1, final boolean dropBehindCompaction) {
|
||||
this.blockCache = blockCache;
|
||||
this.cacheDataOnRead = cacheDataOnRead;
|
||||
this.inMemory = inMemory;
|
||||
|
@ -294,6 +315,7 @@ public class CacheConfig {
|
|||
this.evictOnClose = evictOnClose;
|
||||
this.cacheDataCompressed = cacheDataCompressed;
|
||||
this.prefetchOnOpen = prefetchOnOpen;
|
||||
this.cacheDataInL1 = cacheDataInL1;
|
||||
this.dropBehindCompaction = dropBehindCompaction;
|
||||
}
|
||||
|
||||
|
@ -306,11 +328,12 @@ public class CacheConfig {
|
|||
cacheConf.cacheDataOnWrite, cacheConf.cacheIndexesOnWrite,
|
||||
cacheConf.cacheBloomsOnWrite, cacheConf.evictOnClose,
|
||||
cacheConf.cacheDataCompressed, cacheConf.prefetchOnOpen,
|
||||
cacheConf.dropBehindCompaction);
|
||||
cacheConf.cacheDataInL1, cacheConf.dropBehindCompaction);
|
||||
}
|
||||
|
||||
private CacheConfig() {
|
||||
this(null, false, false, false, false, false, false, false, false, false);
|
||||
this(null, false, false, false, false, false,
|
||||
false, false, false, false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -363,6 +386,13 @@ public class CacheConfig {
|
|||
return isBlockCacheEnabled() && this.inMemory;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return True if cache data blocks in L1 tier (if more than one tier in block cache deploy).
|
||||
*/
|
||||
public boolean isCacheDataInL1() {
|
||||
return isBlockCacheEnabled() && this.cacheDataInL1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if data blocks should be written to the cache when an HFile is
|
||||
* written, false if not
|
||||
|
@ -381,6 +411,16 @@ public class CacheConfig {
|
|||
this.cacheDataOnWrite = cacheDataOnWrite;
|
||||
}
|
||||
|
||||
/**
|
||||
* Only used for testing.
|
||||
* @param cacheDataInL1 Whether to cache data blocks up in l1 (if a multi-tier cache
|
||||
* implementation).
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public void setCacheDataInL1(boolean cacheDataInL1) {
|
||||
this.cacheDataInL1 = cacheDataInL1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if index blocks should be written to the cache when an HFile
|
||||
* is written, false if not
|
||||
|
@ -507,8 +547,8 @@ public class CacheConfig {
|
|||
// Clear this if in tests you'd make more than one block cache instance.
|
||||
@VisibleForTesting
|
||||
static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE;
|
||||
private static LruBlockCache ONHEAP_CACHE_INSTANCE = null;
|
||||
private static BlockCache L2_CACHE_INSTANCE = null;// Can be BucketCache or External cache.
|
||||
private static LruBlockCache GLOBAL_L1_CACHE_INSTANCE = null;
|
||||
private static BlockCache GLOBAL_L2_CACHE_INSTANCE = null;
|
||||
|
||||
/** Boolean whether we have disabled the block cache entirely. */
|
||||
@VisibleForTesting
|
||||
|
@ -518,20 +558,20 @@ public class CacheConfig {
|
|||
* @param c Configuration to use.
|
||||
* @return An L1 instance. Currently an instance of LruBlockCache.
|
||||
*/
|
||||
public static LruBlockCache getOnHeapCache(final Configuration c) {
|
||||
return getOnHeapCacheInternal(c);
|
||||
public static LruBlockCache getL1(final Configuration c) {
|
||||
return getL1Internal(c);
|
||||
}
|
||||
|
||||
public CacheStats getOnHeapCacheStats() {
|
||||
if (ONHEAP_CACHE_INSTANCE != null) {
|
||||
return ONHEAP_CACHE_INSTANCE.getStats();
|
||||
public CacheStats getL1Stats() {
|
||||
if (GLOBAL_L1_CACHE_INSTANCE != null) {
|
||||
return GLOBAL_L1_CACHE_INSTANCE.getStats();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public CacheStats getL2CacheStats() {
|
||||
if (L2_CACHE_INSTANCE != null) {
|
||||
return L2_CACHE_INSTANCE.getStats();
|
||||
public CacheStats getL2Stats() {
|
||||
if (GLOBAL_L2_CACHE_INSTANCE != null) {
|
||||
return GLOBAL_L2_CACHE_INSTANCE.getStats();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -540,26 +580,43 @@ public class CacheConfig {
|
|||
* @param c Configuration to use.
|
||||
* @return An L1 instance. Currently an instance of LruBlockCache.
|
||||
*/
|
||||
private synchronized static LruBlockCache getOnHeapCacheInternal(final Configuration c) {
|
||||
if (ONHEAP_CACHE_INSTANCE != null) {
|
||||
return ONHEAP_CACHE_INSTANCE;
|
||||
}
|
||||
final long cacheSize = MemorySizeUtil.getOnHeapCacheSize(c);
|
||||
if (cacheSize < 0) {
|
||||
private synchronized static LruBlockCache getL1Internal(final Configuration c) {
|
||||
if (GLOBAL_L1_CACHE_INSTANCE != null) return GLOBAL_L1_CACHE_INSTANCE;
|
||||
final long lruCacheSize = MemorySizeUtil.getLruCacheSize(c);
|
||||
if (lruCacheSize < 0) {
|
||||
blockCacheDisabled = true;
|
||||
}
|
||||
if (blockCacheDisabled) return null;
|
||||
int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
|
||||
LOG.info("Allocating On heap LruBlockCache size=" +
|
||||
StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
|
||||
ONHEAP_CACHE_INSTANCE = new LruBlockCache(cacheSize, blockSize, true, c);
|
||||
return ONHEAP_CACHE_INSTANCE;
|
||||
LOG.info("Allocating LruBlockCache size=" +
|
||||
StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
|
||||
GLOBAL_L1_CACHE_INSTANCE = new LruBlockCache(lruCacheSize, blockSize, true, c);
|
||||
return GLOBAL_L1_CACHE_INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param c Configuration to use.
|
||||
* @return Returns L2 block cache instance (for now it is BucketCache BlockCache all the time)
|
||||
* or null if not supposed to be a L2.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static BlockCache getL2(final Configuration c) {
|
||||
final boolean useExternal = c.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Trying to use " + (useExternal?" External":" Internal") + " l2 cache");
|
||||
}
|
||||
|
||||
// If we want to use an external block cache then create that.
|
||||
if (useExternal) {
|
||||
GLOBAL_L2_CACHE_INSTANCE = getExternalBlockcache(c);
|
||||
} else {
|
||||
// otherwise use the bucket cache.
|
||||
GLOBAL_L2_CACHE_INSTANCE = getBucketCache(c);
|
||||
}
|
||||
return GLOBAL_L2_CACHE_INSTANCE;
|
||||
}
|
||||
|
||||
private static BlockCache getExternalBlockcache(Configuration c) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Trying to use External l2 cache");
|
||||
}
|
||||
Class klass = null;
|
||||
|
||||
// Get the class, from the config. s
|
||||
|
@ -585,8 +642,7 @@ public class CacheConfig {
|
|||
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static BucketCache getBucketCache(Configuration c) {
|
||||
private static BlockCache getBucketCache(Configuration c) {
|
||||
// Check for L2. ioengine name must be non-null.
|
||||
String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null);
|
||||
if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) return null;
|
||||
|
@ -649,25 +705,30 @@ public class CacheConfig {
|
|||
public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
|
||||
if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
|
||||
if (blockCacheDisabled) return null;
|
||||
LruBlockCache onHeapCache = getOnHeapCacheInternal(conf);
|
||||
// blockCacheDisabled is set as a side-effect of getL1Internal(), so check it again after the
|
||||
// call.
|
||||
LruBlockCache l1 = getL1Internal(conf);
|
||||
// blockCacheDisabled is set as a side-effect of getL1Internal(), so check it again after the call.
|
||||
if (blockCacheDisabled) return null;
|
||||
boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT);
|
||||
if (useExternal) {
|
||||
L2_CACHE_INSTANCE = getExternalBlockcache(conf);
|
||||
GLOBAL_BLOCK_CACHE_INSTANCE = L2_CACHE_INSTANCE == null ? onHeapCache
|
||||
: new InclusiveCombinedBlockCache(onHeapCache, L2_CACHE_INSTANCE);
|
||||
BlockCache l2 = getL2(conf);
|
||||
if (l2 == null) {
|
||||
GLOBAL_BLOCK_CACHE_INSTANCE = l1;
|
||||
} else {
|
||||
// otherwise use the bucket cache.
|
||||
L2_CACHE_INSTANCE = getBucketCache(conf);
|
||||
if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) {
|
||||
// Non combined mode is off from 2.0
|
||||
LOG.warn(
|
||||
"From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available");
|
||||
boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT);
|
||||
boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY,
|
||||
DEFAULT_BUCKET_CACHE_COMBINED);
|
||||
if (useExternal) {
|
||||
GLOBAL_BLOCK_CACHE_INSTANCE = new InclusiveCombinedBlockCache(l1, l2);
|
||||
} else {
|
||||
if (combinedWithLru) {
|
||||
GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2);
|
||||
} else {
|
||||
// L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler
|
||||
// mechanism. It is a little ugly but works according to the following: when the
|
||||
// background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get
|
||||
// a block from the L1 cache, if not in L1, we will search L2.
|
||||
GLOBAL_BLOCK_CACHE_INSTANCE = l1;
|
||||
}
|
||||
}
|
||||
GLOBAL_BLOCK_CACHE_INSTANCE = L2_CACHE_INSTANCE == null ? onHeapCache
|
||||
: new CombinedBlockCache(onHeapCache, L2_CACHE_INSTANCE);
|
||||
l1.setVictimCache(l2);
|
||||
}
|
||||
return GLOBAL_BLOCK_CACHE_INSTANCE;
|
||||
}
|
||||
|
@ -675,8 +736,8 @@ public class CacheConfig {
|
|||
// Supposed to use only from tests. Some tests want to reinit the Global block cache instance
|
||||
@VisibleForTesting
|
||||
static synchronized void clearGlobalInstances() {
|
||||
ONHEAP_CACHE_INSTANCE = null;
|
||||
L2_CACHE_INSTANCE = null;
|
||||
GLOBAL_L1_CACHE_INSTANCE = null;
|
||||
GLOBAL_L2_CACHE_INSTANCE = null;
|
||||
GLOBAL_BLOCK_CACHE_INSTANCE = null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,21 +31,23 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe
|
|||
/**
|
||||
* CombinedBlockCache is an abstraction layer that combines
|
||||
* {@link LruBlockCache} and {@link BucketCache}. The smaller lruCache is used
|
||||
* to cache bloom blocks and index blocks. The larger Cache is used to
|
||||
* to cache bloom blocks and index blocks. The larger l2Cache is used to
|
||||
* cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads
|
||||
* first from the smaller lruCache before looking for the block in the l2Cache.
|
||||
* first from the smaller lruCache before looking for the block in the l2Cache. Blocks evicted
|
||||
* from lruCache are put into the bucket cache.
|
||||
* Metrics are the combined size and hits and misses of both caches.
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
|
||||
protected final LruBlockCache onHeapCache;
|
||||
protected final LruBlockCache lruCache;
|
||||
protected final BlockCache l2Cache;
|
||||
protected final CombinedCacheStats combinedCacheStats;
|
||||
|
||||
public CombinedBlockCache(LruBlockCache onHeapCache, BlockCache l2Cache) {
|
||||
this.onHeapCache = onHeapCache;
|
||||
public CombinedBlockCache(LruBlockCache lruCache, BlockCache l2Cache) {
|
||||
this.lruCache = lruCache;
|
||||
this.l2Cache = l2Cache;
|
||||
this.combinedCacheStats = new CombinedCacheStats(onHeapCache.getStats(),
|
||||
this.combinedCacheStats = new CombinedCacheStats(lruCache.getStats(),
|
||||
l2Cache.getStats());
|
||||
}
|
||||
|
||||
|
@ -55,22 +57,23 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
|
|||
if (l2Cache instanceof HeapSize) {
|
||||
l2size = ((HeapSize) l2Cache).heapSize();
|
||||
}
|
||||
return onHeapCache.heapSize() + l2size;
|
||||
return lruCache.heapSize() + l2size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
|
||||
final boolean cacheDataInL1) {
|
||||
boolean metaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA;
|
||||
if (metaBlock) {
|
||||
onHeapCache.cacheBlock(cacheKey, buf, inMemory);
|
||||
if (metaBlock || cacheDataInL1) {
|
||||
lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
|
||||
} else {
|
||||
l2Cache.cacheBlock(cacheKey, buf, inMemory);
|
||||
l2Cache.cacheBlock(cacheKey, buf, inMemory, false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
|
||||
cacheBlock(cacheKey, buf, false);
|
||||
cacheBlock(cacheKey, buf, false, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -78,21 +81,19 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
|
|||
boolean repeat, boolean updateCacheMetrics) {
|
||||
// TODO: is there a hole here, or just awkwardness since in the lruCache getBlock
|
||||
// we end up calling l2Cache.getBlock.
|
||||
// We are not in a position to exactly look at LRU cache or BC as BlockType may not be getting
|
||||
// passed always.
|
||||
return onHeapCache.containsBlock(cacheKey)?
|
||||
onHeapCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics):
|
||||
return lruCache.containsBlock(cacheKey)?
|
||||
lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics):
|
||||
l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean evictBlock(BlockCacheKey cacheKey) {
|
||||
return onHeapCache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey);
|
||||
return lruCache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int evictBlocksByHfileName(String hfileName) {
|
||||
return onHeapCache.evictBlocksByHfileName(hfileName)
|
||||
return lruCache.evictBlocksByHfileName(hfileName)
|
||||
+ l2Cache.evictBlocksByHfileName(hfileName);
|
||||
}
|
||||
|
||||
|
@ -103,43 +104,43 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
|
|||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
onHeapCache.shutdown();
|
||||
lruCache.shutdown();
|
||||
l2Cache.shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long size() {
|
||||
return onHeapCache.size() + l2Cache.size();
|
||||
return lruCache.size() + l2Cache.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getMaxSize() {
|
||||
return onHeapCache.getMaxSize() + l2Cache.getMaxSize();
|
||||
return lruCache.getMaxSize() + l2Cache.getMaxSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCurrentDataSize() {
|
||||
return onHeapCache.getCurrentDataSize() + l2Cache.getCurrentDataSize();
|
||||
return lruCache.getCurrentDataSize() + l2Cache.getCurrentDataSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFreeSize() {
|
||||
return onHeapCache.getFreeSize() + l2Cache.getFreeSize();
|
||||
return lruCache.getFreeSize() + l2Cache.getFreeSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCurrentSize() {
|
||||
return onHeapCache.getCurrentSize() + l2Cache.getCurrentSize();
|
||||
return lruCache.getCurrentSize() + l2Cache.getCurrentSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getBlockCount() {
|
||||
return onHeapCache.getBlockCount() + l2Cache.getBlockCount();
|
||||
return lruCache.getBlockCount() + l2Cache.getBlockCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getDataBlockCount() {
|
||||
return onHeapCache.getDataBlockCount() + l2Cache.getDataBlockCount();
|
||||
return lruCache.getDataBlockCount() + l2Cache.getDataBlockCount();
|
||||
}
|
||||
|
||||
public static class CombinedCacheStats extends CacheStats {
|
||||
|
@ -362,12 +363,12 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
|
|||
|
||||
@Override
|
||||
public BlockCache[] getBlockCaches() {
|
||||
return new BlockCache [] {this.onHeapCache, this.l2Cache};
|
||||
return new BlockCache [] {this.lruCache, this.l2Cache};
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setMaxSize(long size) {
|
||||
this.onHeapCache.setMaxSize(size);
|
||||
this.lruCache.setMaxSize(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -378,7 +379,6 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
|
|||
|
||||
@VisibleForTesting
|
||||
public int getRefCount(BlockCacheKey cacheKey) {
|
||||
return (this.l2Cache instanceof BucketCache)
|
||||
? ((BucketCache) this.l2Cache).getRefCount(cacheKey) : 0;
|
||||
return ((BucketCache) this.l2Cache).getRefCount(cacheKey);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1408,7 +1408,8 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
|||
|
||||
// Cache the block
|
||||
if (cacheBlock) {
|
||||
cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock, cacheConf.isInMemory());
|
||||
cacheConf.getBlockCache().cacheBlock(cacheKey, metaBlock,
|
||||
cacheConf.isInMemory(), this.cacheConf.isCacheDataInL1());
|
||||
}
|
||||
|
||||
return metaBlock;
|
||||
|
@ -1494,7 +1495,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
|||
if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category)) {
|
||||
cacheConf.getBlockCache().cacheBlock(cacheKey,
|
||||
cacheConf.shouldCacheCompressed(category) ? hfileBlock : unpacked,
|
||||
cacheConf.isInMemory());
|
||||
cacheConf.isInMemory(), this.cacheConf.isCacheDataInL1());
|
||||
}
|
||||
|
||||
if (updateCacheMetrics && hfileBlock.getBlockType().isData()) {
|
||||
|
|
|
@ -19,13 +19,13 @@
|
|||
|
||||
package org.apache.hadoop.hbase.io.hfile;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class InclusiveCombinedBlockCache extends CombinedBlockCache {
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class InclusiveCombinedBlockCache extends CombinedBlockCache implements BlockCache {
|
||||
public InclusiveCombinedBlockCache(LruBlockCache l1, BlockCache l2) {
|
||||
super(l1,l2);
|
||||
l1.setVictimCache(l2);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -34,7 +34,7 @@ public class InclusiveCombinedBlockCache extends CombinedBlockCache {
|
|||
// On all external cache set ups the lru should have the l2 cache set as the victimHandler
|
||||
// Because of that all requests that miss inside of the lru block cache will be
|
||||
// tried in the l2 block cache.
|
||||
return onHeapCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
|
||||
return lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -43,21 +43,16 @@ public class InclusiveCombinedBlockCache extends CombinedBlockCache {
|
|||
* @param buf The block contents wrapped in a ByteBuffer.
|
||||
* @param inMemory Whether block should be treated as in-memory. This parameter is only useful for
|
||||
* the L1 lru cache.
|
||||
* @param cacheDataInL1 This is totally ignored.
|
||||
*/
|
||||
@Override
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
|
||||
final boolean cacheDataInL1) {
|
||||
// This is the inclusive part of the combined block cache.
|
||||
// Every block is placed into both block caches.
|
||||
onHeapCache.cacheBlock(cacheKey, buf, inMemory);
|
||||
lruCache.cacheBlock(cacheKey, buf, inMemory, true);
|
||||
|
||||
// This assumes that insertion into the L2 block cache is either async or very fast.
|
||||
l2Cache.cacheBlock(cacheKey, buf, inMemory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean evictBlock(BlockCacheKey cacheKey) {
|
||||
boolean l1Result = this.onHeapCache.evictBlock(cacheKey);
|
||||
boolean l2Result = this.l2Cache.evictBlock(cacheKey);
|
||||
return l1Result || l2Result;
|
||||
l2Cache.cacheBlock(cacheKey, buf, inMemory, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.HasThread;
|
||||
|
@ -222,11 +223,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
|
|||
/** Whether in-memory hfile's data block has higher priority when evicting */
|
||||
private boolean forceInMemory;
|
||||
|
||||
/**
|
||||
* Where to send victims (blocks evicted/missing from the cache). This is used only when we use an
|
||||
* external cache as L2.
|
||||
* Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache
|
||||
*/
|
||||
/** Where to send victims (blocks evicted/missing from the cache) */
|
||||
private BlockCache victimHandler = null;
|
||||
|
||||
/**
|
||||
|
@ -363,7 +360,9 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
|
|||
* @param inMemory if block is in-memory
|
||||
*/
|
||||
@Override
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
|
||||
final boolean cacheDataInL1) {
|
||||
|
||||
if (buf.heapSize() > maxBlockSize) {
|
||||
// If there are a lot of blocks that are too
|
||||
// big this can make the logs way too noisy.
|
||||
|
@ -449,7 +448,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
|
|||
* @param buf block buffer
|
||||
*/
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
|
||||
cacheBlock(cacheKey, buf, false);
|
||||
cacheBlock(cacheKey, buf, false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -500,7 +499,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
|
|||
if (result instanceof HFileBlock && ((HFileBlock) result).usesSharedMemory()) {
|
||||
result = ((HFileBlock) result).deepClone();
|
||||
}
|
||||
cacheBlock(cacheKey, result, /* inMemory = */ false);
|
||||
cacheBlock(cacheKey, result, /* inMemory = */ false, /* cacheData = */ true);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -578,7 +577,14 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
|
|||
// update the stats counter.
|
||||
stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary());
|
||||
if (victimHandler != null) {
|
||||
victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer());
|
||||
if (victimHandler instanceof BucketCache) {
|
||||
boolean wait = getCurrentSize() < acceptableSize();
|
||||
boolean inMemory = block.getPriority() == BlockPriority.MEMORY;
|
||||
((BucketCache) victimHandler).cacheBlockWithWait(block.getCacheKey(), block.getBuffer(),
|
||||
inMemory, true, wait);
|
||||
} else {
|
||||
victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer());
|
||||
}
|
||||
}
|
||||
}
|
||||
return block.heapSize();
|
||||
|
@ -1173,6 +1179,10 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
|
|||
return map;
|
||||
}
|
||||
|
||||
BlockCache getVictimHandler() {
|
||||
return this.victimHandler;
|
||||
}
|
||||
|
||||
@Override
|
||||
@JsonIgnore
|
||||
public BlockCache[] getBlockCaches() {
|
||||
|
@ -1180,4 +1190,16 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
|
|||
return new BlockCache[] {this, this.victimHandler};
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void returnBlock(BlockCacheKey cacheKey, Cacheable block) {
|
||||
// There is no SHARED type here in L1. But the block might have been served from the Victim
|
||||
// handler L2 cache. (when the Combined mode = false). So just try return this block to
|
||||
// L2 victim handler cache.
|
||||
// Note : In case of CombinedBlockCache, we will have this victimHandler configured for L1
|
||||
// cache. But CombinedBlockCache will only call returnBlock on L2 cache.
|
||||
if (this.victimHandler != null) {
|
||||
this.victimHandler.returnBlock(cacheKey, block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -397,7 +397,7 @@ public class BucketCache implements BlockCache, HeapSize {
|
|||
*/
|
||||
@Override
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
|
||||
cacheBlock(cacheKey, buf, false);
|
||||
cacheBlock(cacheKey, buf, false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -405,10 +405,12 @@ public class BucketCache implements BlockCache, HeapSize {
|
|||
* @param cacheKey block's cache key
|
||||
* @param cachedItem block buffer
|
||||
* @param inMemory if block is in-memory
|
||||
* @param cacheDataInL1
|
||||
*/
|
||||
@Override
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory) {
|
||||
cacheBlockWithWait(cacheKey, cachedItem, inMemory, wait_when_cache);
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory,
|
||||
final boolean cacheDataInL1) {
|
||||
cacheBlockWithWait(cacheKey, cachedItem, inMemory, cacheDataInL1, wait_when_cache);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -419,18 +421,23 @@ public class BucketCache implements BlockCache, HeapSize {
|
|||
* @param wait if true, blocking wait when queue is full
|
||||
*/
|
||||
public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory,
|
||||
boolean wait) {
|
||||
boolean cacheDataInL1, boolean wait) {
|
||||
if (LOG.isTraceEnabled()) LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem);
|
||||
if (!cacheEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (backingMap.containsKey(cacheKey)) {
|
||||
Cacheable existingBlock = getBlock(cacheKey, false, false, false);
|
||||
if (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
|
||||
throw new RuntimeException("Cached block contents differ, which should not have happened."
|
||||
+ "cacheKey:" + cacheKey);
|
||||
}
|
||||
/*
|
||||
* Compare already cached block only if lruBlockCache is not used to cache data blocks
|
||||
*/
|
||||
if (!cacheDataInL1) {
|
||||
Cacheable existingBlock = getBlock(cacheKey, false, false, false);
|
||||
if (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
|
||||
throw new RuntimeException("Cached block contents differ, which should not have happened."
|
||||
+ "cacheKey:" + cacheKey);
|
||||
}
|
||||
}
|
||||
String msg = "Caching an already cached block: " + cacheKey;
|
||||
msg += ". This is harmless and can happen in rare cases (see HBASE-8547)";
|
||||
LOG.warn(msg);
|
||||
|
|
|
@ -213,7 +213,7 @@ public class MemorySizeUtil {
|
|||
* @return the number of bytes to use for LRU, negative if disabled.
|
||||
* @throws IllegalArgumentException if HFILE_BLOCK_CACHE_SIZE_KEY is > 1.0
|
||||
*/
|
||||
public static long getOnHeapCacheSize(final Configuration conf) {
|
||||
public static long getLruCacheSize(final Configuration conf) {
|
||||
float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
|
||||
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
|
||||
if (cachePercentage <= 0.0001f) {
|
||||
|
|
|
@ -107,9 +107,9 @@ public class HeapMemoryManager {
|
|||
|
||||
public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher,
|
||||
Server server, RegionServerAccounting regionServerAccounting) {
|
||||
ResizableBlockCache lruCache = CacheConfig.getOnHeapCache(conf);
|
||||
if (lruCache != null) {
|
||||
return new HeapMemoryManager(lruCache, memStoreFlusher, server, regionServerAccounting);
|
||||
ResizableBlockCache l1Cache = CacheConfig.getL1(conf);
|
||||
if (l1Cache != null) {
|
||||
return new HeapMemoryManager(l1Cache, memStoreFlusher, server, regionServerAccounting);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -153,8 +153,8 @@ class MetricsRegionServerWrapperImpl
|
|||
private synchronized void initBlockCache() {
|
||||
CacheConfig cacheConfig = this.regionServer.cacheConfig;
|
||||
if (cacheConfig != null) {
|
||||
l1Stats = cacheConfig.getOnHeapCacheStats();
|
||||
l2Stats = cacheConfig.getL2CacheStats();
|
||||
l1Stats = cacheConfig.getL1Stats();
|
||||
l2Stats = cacheConfig.getL2Stats();
|
||||
if (this.blockCache == null) {
|
||||
this.blockCache = cacheConfig.getBlockCache();
|
||||
}
|
||||
|
|
|
@ -1326,7 +1326,10 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
|
|||
setBlockCacheEnabled(true).
|
||||
setBlocksize(8 * 1024).
|
||||
setBloomFilterType(BloomType.NONE).
|
||||
setScope(HConstants.REPLICATION_SCOPE_LOCAL).build();
|
||||
setScope(HConstants.REPLICATION_SCOPE_LOCAL).
|
||||
// Set cache data blocks in L1 if more than one cache tier deployed; e.g. this will
|
||||
// be the case if we are using CombinedBlockCache (Bucket Cache).
|
||||
setCacheDataInL1(true).build();
|
||||
TableDescriptor td =
|
||||
TableDescriptorBuilder.newBuilder(AccessControlLists.ACL_TABLE_NAME).
|
||||
addColumnFamily(cfd).build();
|
||||
|
|
|
@ -155,6 +155,9 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true)
|
||||
.build())
|
||||
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
|
@ -165,6 +168,9 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true)
|
||||
.build())
|
||||
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_POSITION_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
|
@ -175,6 +181,9 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true)
|
||||
.build())
|
||||
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_META_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
|
@ -185,6 +194,9 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true)
|
||||
.build())
|
||||
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY)
|
||||
// Ten is arbitrary number. Keep versions to help debugging.
|
||||
|
@ -194,6 +206,9 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true)
|
||||
.build())
|
||||
.addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
|
||||
null, Coprocessor.PRIORITY_SYSTEM, null);
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.io.IOException;
|
|||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.MemoryUsage;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -185,7 +186,7 @@ public class TestCacheConfig {
|
|||
Cacheable c = new DataCacheEntry();
|
||||
// Do asserts on block counting.
|
||||
long initialBlockCount = bc.getBlockCount();
|
||||
bc.cacheBlock(bck, c, cc.isInMemory());
|
||||
bc.cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1());
|
||||
assertEquals(doubling? 2: 1, bc.getBlockCount() - initialBlockCount);
|
||||
bc.evictBlock(bck);
|
||||
assertEquals(initialBlockCount, bc.getBlockCount());
|
||||
|
@ -193,7 +194,7 @@ public class TestCacheConfig {
|
|||
// buffers do lazy allocation so sizes are off on first go around.
|
||||
if (sizing) {
|
||||
long originalSize = bc.getCurrentSize();
|
||||
bc.cacheBlock(bck, c, cc.isInMemory());
|
||||
bc.cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1());
|
||||
assertTrue(bc.getCurrentSize() > originalSize);
|
||||
bc.evictBlock(bck);
|
||||
long size = bc.getCurrentSize();
|
||||
|
@ -201,6 +202,19 @@ public class TestCacheConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param cc
|
||||
* @param filename
|
||||
* @return
|
||||
*/
|
||||
private long cacheDataBlock(final CacheConfig cc, final String filename) {
|
||||
BlockCacheKey bck = new BlockCacheKey(filename, 0);
|
||||
Cacheable c = new DataCacheEntry();
|
||||
// Do asserts on block counting.
|
||||
cc.getBlockCache().cacheBlock(bck, c, cc.isInMemory(), cc.isCacheDataInL1());
|
||||
return cc.getBlockCache().getBlockCount();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDisableCacheDataBlock() throws IOException {
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
|
@ -310,7 +324,7 @@ public class TestCacheConfig {
|
|||
BlockCache [] bcs = cbc.getBlockCaches();
|
||||
assertTrue(bcs[0] instanceof LruBlockCache);
|
||||
LruBlockCache lbc = (LruBlockCache)bcs[0];
|
||||
assertEquals(MemorySizeUtil.getOnHeapCacheSize(this.conf), lbc.getMaxSize());
|
||||
assertEquals(MemorySizeUtil.getLruCacheSize(this.conf), lbc.getMaxSize());
|
||||
assertTrue(bcs[1] instanceof BucketCache);
|
||||
BucketCache bc = (BucketCache)bcs[1];
|
||||
// getMaxSize comes back in bytes but we specified size in MB
|
||||
|
@ -328,19 +342,19 @@ public class TestCacheConfig {
|
|||
// from L1 happens, it does not fail because L2 can't take the eviction because block too big.
|
||||
this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f);
|
||||
MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||
long lruExpectedSize = MemorySizeUtil.getOnHeapCacheSize(this.conf);
|
||||
long lruExpectedSize = MemorySizeUtil.getLruCacheSize(this.conf);
|
||||
final int bcSize = 100;
|
||||
long bcExpectedSize = 100 * 1024 * 1024; // MB.
|
||||
assertTrue(lruExpectedSize < bcExpectedSize);
|
||||
this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
|
||||
this.conf.setBoolean(CacheConfig.BUCKET_CACHE_COMBINED_KEY, false);
|
||||
CacheConfig cc = new CacheConfig(this.conf);
|
||||
basicBlockCacheOps(cc, false, false);
|
||||
assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
|
||||
assertTrue(cc.getBlockCache() instanceof LruBlockCache);
|
||||
// TODO: Assert sizes allocated are right and proportions.
|
||||
CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache();
|
||||
LruBlockCache lbc = cbc.onHeapCache;
|
||||
LruBlockCache lbc = (LruBlockCache)cc.getBlockCache();
|
||||
assertEquals(lruExpectedSize, lbc.getMaxSize());
|
||||
BlockCache bc = cbc.l2Cache;
|
||||
BlockCache bc = lbc.getVictimHandler();
|
||||
// getMaxSize comes back in bytes but we specified size in MB
|
||||
assertEquals(bcExpectedSize, ((BucketCache) bc).getMaxSize());
|
||||
// Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2.
|
||||
|
@ -348,7 +362,7 @@ public class TestCacheConfig {
|
|||
long initialL2BlockCount = bc.getBlockCount();
|
||||
Cacheable c = new DataCacheEntry();
|
||||
BlockCacheKey bck = new BlockCacheKey("bck", 0);
|
||||
lbc.cacheBlock(bck, c, false);
|
||||
lbc.cacheBlock(bck, c, false, false);
|
||||
assertEquals(initialL1BlockCount + 1, lbc.getBlockCount());
|
||||
assertEquals(initialL2BlockCount, bc.getBlockCount());
|
||||
// Force evictions by putting in a block too big.
|
||||
|
@ -367,6 +381,32 @@ public class TestCacheConfig {
|
|||
// The eviction thread in lrublockcache needs to run.
|
||||
while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10);
|
||||
assertEquals(initialL1BlockCount, lbc.getBlockCount());
|
||||
long count = bc.getBlockCount();
|
||||
assertTrue(initialL2BlockCount + 1 <= count);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the cacheDataInL1 flag. When set, data blocks should be cached in the l1 tier, up in
|
||||
* LruBlockCache when using CombinedBlockCcahe.
|
||||
*/
|
||||
@Test
|
||||
public void testCacheDataInL1() {
|
||||
this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
|
||||
this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 100);
|
||||
CacheConfig cc = new CacheConfig(this.conf);
|
||||
assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
|
||||
CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache();
|
||||
// Add a data block. Should go into L2, into the Bucket Cache, not the LruBlockCache.
|
||||
cacheDataBlock(cc, "1");
|
||||
LruBlockCache lrubc = (LruBlockCache)cbc.getBlockCaches()[0];
|
||||
assertDataBlockCount(lrubc, 0);
|
||||
// Enable our test flag.
|
||||
cc.setCacheDataInL1(true);
|
||||
cacheDataBlock(cc, "2");
|
||||
assertDataBlockCount(lrubc, 1);
|
||||
cc.setCacheDataInL1(false);
|
||||
cacheDataBlock(cc, "3");
|
||||
assertDataBlockCount(lrubc, 1);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -376,9 +416,16 @@ public class TestCacheConfig {
|
|||
c.set(CacheConfig.BUCKET_CACHE_BUCKETS_KEY, "256,512,1024,2048,4000,4096");
|
||||
c.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 1024);
|
||||
try {
|
||||
CacheConfig.getBucketCache(c);
|
||||
CacheConfig.getL2(c);
|
||||
fail("Should throw IllegalArgumentException when passing illegal value for bucket size");
|
||||
} catch (IllegalArgumentException e) {
|
||||
}
|
||||
}
|
||||
|
||||
private void assertDataBlockCount(final LruBlockCache bc, final int expected) {
|
||||
Map<BlockType, Integer> blocks = bc.getBlockTypeCountsForTest();
|
||||
assertEquals(expected, blocks == null? 0:
|
||||
blocks.get(BlockType.DATA) == null? 0:
|
||||
blocks.get(BlockType.DATA).intValue());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ public class TestCacheOnWrite {
|
|||
new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
|
||||
cowType.shouldBeCached(BlockType.LEAF_INDEX),
|
||||
cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData,
|
||||
false, false);
|
||||
false, false, false);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -80,7 +80,7 @@ public class TestLruBlockCache {
|
|||
for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) {
|
||||
CachedItem block = new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement());
|
||||
boolean inMemory = Math.random() > 0.5;
|
||||
cache.cacheBlock(block.cacheKey, block, inMemory);
|
||||
cache.cacheBlock(block.cacheKey, block, inMemory, false);
|
||||
}
|
||||
cache.evictBlocksByHfileName(hfileName);
|
||||
}
|
||||
|
@ -350,7 +350,7 @@ public class TestLruBlockCache {
|
|||
cache.getBlock(multiBlocks[i].cacheKey, true, false, true);
|
||||
|
||||
// Add memory blocks as such
|
||||
cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true);
|
||||
cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true, false);
|
||||
expectedCacheSize += memoryBlocks[i].cacheBlockHeapSize();
|
||||
|
||||
}
|
||||
|
@ -385,7 +385,7 @@ public class TestLruBlockCache {
|
|||
assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true));
|
||||
|
||||
// Insert another memory block
|
||||
cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true);
|
||||
cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true, false);
|
||||
|
||||
// Three evictions, three evicted.
|
||||
assertEquals(3, cache.getStats().getEvictionCount());
|
||||
|
@ -423,7 +423,7 @@ public class TestLruBlockCache {
|
|||
assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true));
|
||||
|
||||
// Cache a big memory block
|
||||
cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true);
|
||||
cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true, false);
|
||||
|
||||
// Six evictions, twelve evicted (3 new)
|
||||
assertEquals(6, cache.getStats().getEvictionCount());
|
||||
|
@ -478,7 +478,7 @@ public class TestLruBlockCache {
|
|||
assertEquals(expectedCacheSize, cache.heapSize());
|
||||
|
||||
// 1. Insert a memory block, oldest single should be evicted, si:mu:me = 4:4:1
|
||||
cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true);
|
||||
cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true, false);
|
||||
// Single eviction, one block evicted
|
||||
assertEquals(1, cache.getStats().getEvictionCount());
|
||||
assertEquals(1, cache.getStats().getEvictedCount());
|
||||
|
@ -486,7 +486,7 @@ public class TestLruBlockCache {
|
|||
assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true));
|
||||
|
||||
// 2. Insert another memory block, another single evicted, si:mu:me = 3:4:2
|
||||
cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true);
|
||||
cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true, false);
|
||||
// Two evictions, two evicted.
|
||||
assertEquals(2, cache.getStats().getEvictionCount());
|
||||
assertEquals(2, cache.getStats().getEvictedCount());
|
||||
|
@ -494,10 +494,10 @@ public class TestLruBlockCache {
|
|||
assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true));
|
||||
|
||||
// 3. Insert 4 memory blocks, 2 single and 2 multi evicted, si:mu:me = 1:2:6
|
||||
cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true);
|
||||
cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true);
|
||||
cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true);
|
||||
cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true);
|
||||
cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true, false);
|
||||
cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true, false);
|
||||
cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true, false);
|
||||
cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true, false);
|
||||
// Three evictions, three evicted.
|
||||
assertEquals(6, cache.getStats().getEvictionCount());
|
||||
assertEquals(6, cache.getStats().getEvictedCount());
|
||||
|
@ -509,9 +509,9 @@ public class TestLruBlockCache {
|
|||
|
||||
// 4. Insert 3 memory blocks, the remaining 1 single and 2 multi evicted
|
||||
// si:mu:me = 0:0:9
|
||||
cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true);
|
||||
cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true);
|
||||
cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true);
|
||||
cache.cacheBlock(memoryBlocks[6].cacheKey, memoryBlocks[6], true, false);
|
||||
cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true, false);
|
||||
cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true, false);
|
||||
// Three evictions, three evicted.
|
||||
assertEquals(9, cache.getStats().getEvictionCount());
|
||||
assertEquals(9, cache.getStats().getEvictedCount());
|
||||
|
@ -522,7 +522,7 @@ public class TestLruBlockCache {
|
|||
|
||||
// 5. Insert one memory block, the oldest memory evicted
|
||||
// si:mu:me = 0:0:9
|
||||
cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true);
|
||||
cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true, false);
|
||||
// one eviction, one evicted.
|
||||
assertEquals(10, cache.getStats().getEvictionCount());
|
||||
assertEquals(10, cache.getStats().getEvictedCount());
|
||||
|
@ -679,7 +679,7 @@ public class TestLruBlockCache {
|
|||
cache.getBlock(multiBlocks[i].cacheKey, true, false, true);
|
||||
|
||||
// Add memory blocks as such
|
||||
cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true);
|
||||
cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true, false);
|
||||
}
|
||||
|
||||
// Do not expect any evictions yet
|
||||
|
|
|
@ -104,11 +104,12 @@ public class TestBucketCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
|
||||
boolean cacheDataInL1) {
|
||||
if (super.getBlock(cacheKey, true, false, true) != null) {
|
||||
throw new RuntimeException("Cached an already cached block");
|
||||
}
|
||||
super.cacheBlock(cacheKey, buf, inMemory);
|
||||
super.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -645,7 +645,8 @@ public class TestHeapMemoryManager {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
|
||||
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
|
||||
boolean cacheDataInL1) {
|
||||
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue