HBASE-5001 Improve the performance of block cache keys (Lars H and J-D)

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1213995 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
larsh 2011-12-14 00:05:04 +00:00
parent 69b7530f60
commit cd0c4cffa9
17 changed files with 272 additions and 217 deletions

View File

@ -27,50 +27,44 @@ import org.apache.hadoop.conf.Configuration;
/**
* Block cache interface. Anything that implements the {@link Cacheable}
* interface can be put in the cache.
*
* TODO: Add filename or hash of filename to block cache key.
*/
public interface BlockCache {
/**
* Add block to cache.
* @param blockName Zero-based file block number.
* @param cacheKey The block's cache key.
* @param buf The block contents wrapped in a ByteBuffer.
* @param inMemory Whether block should be treated as in-memory
*/
public void cacheBlock(String blockName, Cacheable buf, boolean inMemory);
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory);
/**
* Add block to cache (defaults to not in-memory).
* @param blockName Zero-based file block number.
* @param cacheKey The block's cache key.
* @param buf The object to cache.
*/
public void cacheBlock(String blockName, Cacheable buf);
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf);
/**
* Fetch block from cache.
* @param blockName Block number to fetch.
* @param cacheKey Block to fetch.
* @param caching Whether this request has caching enabled (used for stats)
* @return Block or null if block is not in 2 cache.
*/
public Cacheable getBlock(String blockName, boolean caching);
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching);
/**
* Evict block from cache.
* @param blockName Block name to evict
* @param cacheKey Block to evict
* @return true if block existed and was evicted, false if not
*/
public boolean evictBlock(String blockName);
public boolean evictBlock(BlockCacheKey cacheKey);
/**
* Evicts all blocks with name starting with the given prefix. This is
* necessary in cases we need to evict all blocks that belong to a particular
* HFile. In HFile v2 all blocks consist of the storefile name (UUID), an
* underscore, and the block offset in the file. An efficient implementation
* would avoid scanning all blocks in the cache.
* Evicts all blocks for the given HFile.
*
* @return the number of blocks evicted
*/
public int evictBlocksByPrefix(String string);
public int evictBlocksByHfileName(String hfileName);
/**
* Get the statistics for this block cache.

View File

@ -0,0 +1,76 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.io.hfile;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Cache Key for use with implementations of {@link BlockCache}
*/
public class BlockCacheKey implements HeapSize {
private String hfileName;
private long offset;
/**
* Construct a new BlockCacheKey
* @param file The name of the HFile this block belongs to.
* @param offset Offset of the block into the file
*/
public BlockCacheKey(String file, long offset) {
this.hfileName = file;
this.offset = offset;
}
@Override
public int hashCode() {
return hfileName.hashCode() * 127 + (int) (offset ^ (offset >>> 32));
}
@Override
public boolean equals(Object o) {
if (o instanceof BlockCacheKey) {
BlockCacheKey k = (BlockCacheKey) o;
return offset == k.offset
&& (hfileName == null ? k.hfileName == null : hfileName
.equals(k.hfileName));
} else {
return false;
}
}
@Override
public String toString() {
return hfileName + "_" + offset;
}
// Strings have two bytes per character due to default
// Java unicode encoding (hence the times 2).
@Override
public long heapSize() {
return 2 * hfileName.length() + Bytes.SIZEOF_LONG;
}
// can't avoid this unfortunately
/**
* @return The hfileName portion of this cache key
*/
public String getHfileName() {
return hfileName;
}
}

View File

@ -52,28 +52,27 @@ public class CachedBlock implements HeapSize, Comparable<CachedBlock> {
MEMORY
};
private final String blockName;
private final BlockCacheKey cacheKey;
private final Cacheable buf;
private volatile long accessTime;
private long size;
private BlockPriority priority;
public CachedBlock(String blockName, Cacheable buf, long accessTime) {
this(blockName, buf, accessTime, false);
public CachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime) {
this(cacheKey, buf, accessTime, false);
}
public CachedBlock(String blockName, Cacheable buf, long accessTime,
public CachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime,
boolean inMemory) {
this.blockName = blockName;
this.cacheKey = cacheKey;
this.buf = buf;
this.accessTime = accessTime;
// We approximate the size of this class by the size of its name string
// plus the size of its byte buffer plus the overhead associated with all
// the base classes. Strings have two bytes per character due to default
// Java unicode encoding (hence the times 2). We also include the base class
// the base classes. We also include the base class
// sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with
// their buffer lengths. This variable is used elsewhere in unit tests.
this.size = ClassSize.align(2 * blockName.length())
this.size = ClassSize.align(cacheKey.heapSize())
+ ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD;
if(inMemory) {
this.priority = BlockPriority.MEMORY;
@ -105,11 +104,11 @@ public class CachedBlock implements HeapSize, Comparable<CachedBlock> {
return this.buf;
}
public String getName() {
return this.blockName;
public BlockCacheKey getCacheKey() {
return this.cacheKey;
}
public BlockPriority getPriority() {
return this.priority;
}
}
}

View File

@ -78,28 +78,28 @@ public class DoubleBlockCache implements BlockCache, HeapSize {
}
@Override
public void cacheBlock(String blockName, Cacheable buf, boolean inMemory) {
onHeapCache.cacheBlock(blockName, buf, inMemory);
offHeapCache.cacheBlock(blockName, buf);
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
onHeapCache.cacheBlock(cacheKey, buf, inMemory);
offHeapCache.cacheBlock(cacheKey, buf);
}
@Override
public void cacheBlock(String blockName, Cacheable buf) {
onHeapCache.cacheBlock(blockName, buf);
offHeapCache.cacheBlock(blockName, buf);
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
onHeapCache.cacheBlock(cacheKey, buf);
offHeapCache.cacheBlock(cacheKey, buf);
}
@Override
public Cacheable getBlock(String blockName, boolean caching) {
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching) {
Cacheable cachedBlock;
if ((cachedBlock = onHeapCache.getBlock(blockName, caching)) != null) {
if ((cachedBlock = onHeapCache.getBlock(cacheKey, caching)) != null) {
stats.hit(caching);
return cachedBlock;
} else if ((cachedBlock = offHeapCache.getBlock(blockName, caching)) != null) {
} else if ((cachedBlock = offHeapCache.getBlock(cacheKey, caching)) != null) {
if (caching) {
onHeapCache.cacheBlock(blockName, cachedBlock);
onHeapCache.cacheBlock(cacheKey, cachedBlock);
}
stats.hit(caching);
return cachedBlock;
@ -110,10 +110,10 @@ public class DoubleBlockCache implements BlockCache, HeapSize {
}
@Override
public boolean evictBlock(String blockName) {
public boolean evictBlock(BlockCacheKey cacheKey) {
stats.evict();
boolean cacheA = onHeapCache.evictBlock(blockName);
boolean cacheB = offHeapCache.evictBlock(blockName);
boolean cacheA = onHeapCache.evictBlock(cacheKey);
boolean cacheB = offHeapCache.evictBlock(cacheKey);
boolean evicted = cacheA || cacheB;
if (evicted) {
stats.evicted();
@ -154,9 +154,9 @@ public class DoubleBlockCache implements BlockCache, HeapSize {
}
@Override
public int evictBlocksByPrefix(String prefix) {
onHeapCache.evictBlocksByPrefix(prefix);
offHeapCache.evictBlocksByPrefix(prefix);
public int evictBlocksByHfileName(String hfileName) {
onHeapCache.evictBlocksByHfileName(hfileName);
offHeapCache.evictBlocksByHfileName(hfileName);
return 0;
}

View File

@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.io.HbaseMapWritable;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.SchemaAware;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
@ -143,9 +142,6 @@ public class HFile {
public final static String DEFAULT_COMPRESSION =
DEFAULT_COMPRESSION_ALGORITHM.getName();
/** Separator between HFile name and offset in block cache key */
static final char CACHE_KEY_SEPARATOR = '_';
/**
* We assume that HFile path ends with
* ROOT_DIR/TABLE_NAME/REGION_NAME/CF_NAME/HFILE, so it has at least this
@ -505,8 +501,8 @@ public class HFile {
System.exit(prettyPrinter.run(args));
}
public static String getBlockCacheKey(String hfileName, long offset) {
return hfileName + CACHE_KEY_SEPARATOR + offset;
public static BlockCacheKey getBlockCacheKey(String hfileName, long offset) {
return new BlockCacheKey(hfileName, offset);
}
/**

View File

@ -211,7 +211,7 @@ public class HFileReaderV1 extends AbstractHFileReader {
long startTimeNs = System.nanoTime();
String cacheKey = HFile.getBlockCacheKey(name, offset);
BlockCacheKey cacheKey = HFile.getBlockCacheKey(name, offset);
// Per meta key from any given file, synchronize reads for said block
synchronized (metaBlockIndexReader.getRootBlockKey(block)) {
@ -271,7 +271,7 @@ public class HFileReaderV1 extends AbstractHFileReader {
}
long offset = dataBlockIndexReader.getRootBlockOffset(block);
String cacheKey = HFile.getBlockCacheKey(name, offset);
BlockCacheKey cacheKey = HFile.getBlockCacheKey(name, offset);
// For any given block from any given file, synchronize reads for said
// block.

View File

@ -19,9 +19,7 @@
*/
package org.apache.hadoop.hbase.io.hfile;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@ -185,7 +183,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
// Check cache for block. If found return.
long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block);
String cacheKey = HFile.getBlockCacheKey(name, metaBlockOffset);
BlockCacheKey cacheKey = HFile.getBlockCacheKey(name, metaBlockOffset);
cacheBlock &= cacheConf.shouldCacheDataOnRead();
if (cacheConf.isBlockCacheEnabled()) {
@ -251,7 +249,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
// the other choice is to duplicate work (which the cache would prevent you
// from doing).
String cacheKey = HFile.getBlockCacheKey(name, dataBlockOffset);
BlockCacheKey cacheKey = HFile.getBlockCacheKey(name, dataBlockOffset);
IdLock.Entry lockEntry = offsetLock.getLockEntry(dataBlockOffset);
try {
blockLoads.incrementAndGet();
@ -334,8 +332,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
public void close(boolean evictOnClose) throws IOException {
if (evictOnClose && cacheConf.isBlockCacheEnabled()) {
int numEvicted = cacheConf.getBlockCache().evictBlocksByPrefix(name
+ HFile.CACHE_KEY_SEPARATOR);
int numEvicted = cacheConf.getBlockCache().evictBlocksByHfileName(name);
if (LOG.isTraceEnabled()) {
LOG.trace("On close, file=" + name + " evicted=" + numEvicted
+ " block(s)");

View File

@ -110,7 +110,7 @@ public class LruBlockCache implements BlockCache, HeapSize {
static final int statThreadPeriod = 60 * 5;
/** Concurrent map (the cache) */
private final ConcurrentHashMap<String,CachedBlock> map;
private final ConcurrentHashMap<BlockCacheKey,CachedBlock> map;
/** Eviction lock (locked when eviction in process) */
private final ReentrantLock evictionLock = new ReentrantLock(true);
@ -220,7 +220,7 @@ public class LruBlockCache implements BlockCache, HeapSize {
}
this.maxSize = maxSize;
this.blockSize = blockSize;
map = new ConcurrentHashMap<String,CachedBlock>(mapInitialSize,
map = new ConcurrentHashMap<BlockCacheKey,CachedBlock>(mapInitialSize,
mapLoadFactor, mapConcurrencyLevel);
this.minFactor = minFactor;
this.acceptableFactor = acceptableFactor;
@ -256,18 +256,18 @@ public class LruBlockCache implements BlockCache, HeapSize {
* <p>
* It is assumed this will NEVER be called on an already cached block. If
* that is done, an exception will be thrown.
* @param blockName block name
* @param cacheKey block's cache key
* @param buf block buffer
* @param inMemory if block is in-memory
*/
public void cacheBlock(String blockName, Cacheable buf, boolean inMemory) {
CachedBlock cb = map.get(blockName);
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
CachedBlock cb = map.get(cacheKey);
if(cb != null) {
throw new RuntimeException("Cached an already cached block");
}
cb = new CachedBlock(blockName, buf, count.incrementAndGet(), inMemory);
cb = new CachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory);
long newSize = updateSizeMetrics(cb, false);
map.put(blockName, cb);
map.put(cacheKey, cb);
elements.incrementAndGet();
if(newSize > acceptableSize() && !evictionInProgress) {
runEviction();
@ -281,11 +281,11 @@ public class LruBlockCache implements BlockCache, HeapSize {
* that is done, it is assumed that you are reinserting the same exact
* block due to a race condition and will update the buffer but not modify
* the size of the cache.
* @param blockName block name
* @param cacheKey block's cache key
* @param buf block buffer
*/
public void cacheBlock(String blockName, Cacheable buf) {
cacheBlock(blockName, buf, false);
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
cacheBlock(cacheKey, buf, false);
}
/**
@ -312,13 +312,13 @@ public class LruBlockCache implements BlockCache, HeapSize {
/**
* Get the buffer of the block with the specified name.
* @param blockName block name
* @param cacheKey block's cache key
* @param caching true if the caller caches blocks on cache misses
* @return buffer of specified block name, or null if not in cache
* @return buffer of specified cache key, or null if not in cache
*/
@Override
public Cacheable getBlock(String blockName, boolean caching) {
CachedBlock cb = map.get(blockName);
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching) {
CachedBlock cb = map.get(cacheKey);
if(cb == null) {
stats.miss(caching);
return null;
@ -330,31 +330,28 @@ public class LruBlockCache implements BlockCache, HeapSize {
@Override
public boolean evictBlock(String blockName) {
CachedBlock cb = map.get(blockName);
public boolean evictBlock(BlockCacheKey cacheKey) {
CachedBlock cb = map.get(cacheKey);
if (cb == null) return false;
evictBlock(cb);
return true;
}
/**
* Evicts all blocks whose name starts with the given prefix. This is an
* Evicts all blocks for a specific HFile. This is an
* expensive operation implemented as a linear-time search through all blocks
* in the cache. Ideally this should be a search in a log-access-time map.
*
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
* The prefix would be the HFile/StoreFile name (a UUID) followed by an
* underscore, because HFile v2 block names in cache are of the form
* "&lt;storeFileUUID&gt;_&lt;blockOffset&gt;".
*
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByPrefix(String prefix) {
public int evictBlocksByHfileName(String hfileName) {
int numEvicted = 0;
for (String key : map.keySet()) {
if (key.startsWith(prefix)) {
for (BlockCacheKey key : map.keySet()) {
if (key.getHfileName().equals(hfileName)) {
if (evictBlock(key))
++numEvicted;
}
@ -363,7 +360,7 @@ public class LruBlockCache implements BlockCache, HeapSize {
}
protected long evictBlock(CachedBlock block) {
map.remove(block.getName());
map.remove(block.getCacheKey());
updateSizeMetrics(block, true);
elements.decrementAndGet();
stats.evicted();
@ -683,26 +680,19 @@ public class LruBlockCache implements BlockCache, HeapSize {
Map<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary> bcs =
new HashMap<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary>();
final String pattern = "\\" + HFile.CACHE_KEY_SEPARATOR;
for (CachedBlock cb : map.values()) {
// split name and get the first part (e.g., "8351478435190657655_0")
// see HFile.getBlockCacheKey for structure of block cache key.
String s[] = cb.getName().split(pattern);
if (s.length > 0) {
String sf = s[0];
Path path = sfMap.get(sf);
if ( path != null) {
BlockCacheColumnFamilySummary lookup =
BlockCacheColumnFamilySummary.createFromStoreFilePath(path);
BlockCacheColumnFamilySummary bcse = bcs.get(lookup);
if (bcse == null) {
bcse = BlockCacheColumnFamilySummary.create(lookup);
bcs.put(lookup,bcse);
}
bcse.incrementBlocks();
bcse.incrementHeapSize(cb.heapSize());
String sf = cb.getCacheKey().getHfileName();
Path path = sfMap.get(sf);
if ( path != null) {
BlockCacheColumnFamilySummary lookup =
BlockCacheColumnFamilySummary.createFromStoreFilePath(path);
BlockCacheColumnFamilySummary bcse = bcs.get(lookup);
if (bcse == null) {
bcse = BlockCacheColumnFamilySummary.create(lookup);
bcs.put(lookup,bcse);
}
bcse.incrementBlocks();
bcse.incrementHeapSize(cb.heapSize());
}
}
List<BlockCacheColumnFamilySummary> list =

View File

@ -33,14 +33,14 @@ import org.apache.hadoop.conf.Configuration;
*/
public class SimpleBlockCache implements BlockCache {
private static class Ref extends SoftReference<Cacheable> {
public String blockId;
public Ref(String blockId, Cacheable block, ReferenceQueue q) {
public BlockCacheKey blockId;
public Ref(BlockCacheKey blockId, Cacheable block, ReferenceQueue q) {
super(block, q);
this.blockId = blockId;
}
}
private Map<String,Ref> cache =
new HashMap<String,Ref>();
private Map<BlockCacheKey,Ref> cache =
new HashMap<BlockCacheKey,Ref>();
private ReferenceQueue q = new ReferenceQueue();
public int dumps = 0;
@ -68,26 +68,26 @@ public class SimpleBlockCache implements BlockCache {
return cache.size();
}
public synchronized Cacheable getBlock(String blockName, boolean caching) {
public synchronized Cacheable getBlock(BlockCacheKey cacheKey, boolean caching) {
processQueue(); // clear out some crap.
Ref ref = cache.get(blockName);
Ref ref = cache.get(cacheKey);
if (ref == null)
return null;
return ref.get();
}
public synchronized void cacheBlock(String blockName, Cacheable block) {
cache.put(blockName, new Ref(blockName, block, q));
public synchronized void cacheBlock(BlockCacheKey cacheKey, Cacheable block) {
cache.put(cacheKey, new Ref(cacheKey, block, q));
}
public synchronized void cacheBlock(String blockName, Cacheable block,
public synchronized void cacheBlock(BlockCacheKey cacheKey, Cacheable block,
boolean inMemory) {
cache.put(blockName, new Ref(blockName, block, q));
cache.put(cacheKey, new Ref(cacheKey, block, q));
}
@Override
public boolean evictBlock(String blockName) {
return cache.remove(blockName) != null;
public boolean evictBlock(BlockCacheKey cacheKey) {
return cache.remove(cacheKey) != null;
}
public void shutdown() {
@ -119,7 +119,7 @@ public class SimpleBlockCache implements BlockCache {
}
@Override
public int evictBlocksByPrefix(String string) {
public int evictBlocksByHfileName(String string) {
throw new UnsupportedOperationException();
}

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.io.hfile.Cacheable;
import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
@ -53,7 +54,7 @@ import com.google.common.collect.MapMaker;
**/
public class SingleSizeCache implements BlockCache, HeapSize {
private final Slab backingStore;
private final ConcurrentMap<String, CacheablePair> backingMap;
private final ConcurrentMap<BlockCacheKey, CacheablePair> backingMap;
private final int numBlocks;
private final int blockSize;
private final CacheStats stats;
@ -90,9 +91,9 @@ public class SingleSizeCache implements BlockCache, HeapSize {
// This evictionListener is called whenever the cache automatically
// evicts
// something.
MapEvictionListener<String, CacheablePair> listener = new MapEvictionListener<String, CacheablePair>() {
MapEvictionListener<BlockCacheKey, CacheablePair> listener = new MapEvictionListener<BlockCacheKey, CacheablePair>() {
@Override
public void onEviction(String key, CacheablePair value) {
public void onEviction(BlockCacheKey key, CacheablePair value) {
timeSinceLastAccess.set(System.nanoTime()
- value.recentlyAccessed.get());
stats.evict();
@ -106,7 +107,7 @@ public class SingleSizeCache implements BlockCache, HeapSize {
}
@Override
public void cacheBlock(String blockName, Cacheable toBeCached) {
public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) {
ByteBuffer storedBlock;
try {
@ -138,7 +139,7 @@ public class SingleSizeCache implements BlockCache, HeapSize {
}
@Override
public Cacheable getBlock(String key, boolean caching) {
public Cacheable getBlock(BlockCacheKey key, boolean caching) {
CacheablePair contentBlock = backingMap.get(key);
if (contentBlock == null) {
stats.miss(caching);
@ -170,7 +171,7 @@ public class SingleSizeCache implements BlockCache, HeapSize {
* @param key the key of the entry we are going to evict
* @return the evicted ByteBuffer
*/
public boolean evictBlock(String key) {
public boolean evictBlock(BlockCacheKey key) {
stats.evict();
CacheablePair evictedBlock = backingMap.remove(key);
@ -181,7 +182,7 @@ public class SingleSizeCache implements BlockCache, HeapSize {
}
private void doEviction(String key, CacheablePair evictedBlock) {
private void doEviction(BlockCacheKey key, CacheablePair evictedBlock) {
long evictedHeap = 0;
synchronized (evictedBlock) {
if (evictedBlock.serializedData == null) {
@ -282,8 +283,8 @@ public class SingleSizeCache implements BlockCache, HeapSize {
/* Since its offheap, it doesn't matter if its in memory or not */
@Override
public void cacheBlock(String blockName, Cacheable buf, boolean inMemory) {
this.cacheBlock(blockName, buf);
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
this.cacheBlock(cacheKey, buf);
}
/*
@ -291,10 +292,10 @@ public class SingleSizeCache implements BlockCache, HeapSize {
* implemented in the event we want to use this as a standalone cache.
*/
@Override
public int evictBlocksByPrefix(String prefix) {
public int evictBlocksByHfileName(String hfileName) {
int evictedCount = 0;
for (String e : backingMap.keySet()) {
if (e.startsWith(prefix)) {
for (BlockCacheKey e : backingMap.keySet()) {
if (e.getHfileName().equals(hfileName)) {
this.evictBlock(e);
}
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.io.hfile.Cacheable;
import org.apache.hadoop.hbase.util.ClassSize;
@ -53,7 +54,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
**/
public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
private final ConcurrentHashMap<String, SingleSizeCache> backingStore;
private final ConcurrentHashMap<BlockCacheKey, SingleSizeCache> backingStore;
private final TreeMap<Integer, SingleSizeCache> sizer;
static final Log LOG = LogFactory.getLog(SlabCache.class);
static final int STAT_THREAD_PERIOD_SECS = 60 * 5;
@ -85,7 +86,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
this.requestStats = new SlabStats();
this.successfullyCachedStats = new SlabStats();
backingStore = new ConcurrentHashMap<String, SingleSizeCache>();
backingStore = new ConcurrentHashMap<BlockCacheKey, SingleSizeCache>();
sizer = new TreeMap<Integer, SingleSizeCache>();
this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this),
STAT_THREAD_PERIOD_SECS, STAT_THREAD_PERIOD_SECS, TimeUnit.SECONDS);
@ -184,7 +185,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
}
/**
* Cache the block with the specified name and buffer. First finds what size
* Cache the block with the specified key and buffer. First finds what size
* SingleSlabCache it should fit in. If the block doesn't fit in any, it will
* return without doing anything.
* <p>
@ -192,10 +193,10 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
* is done, it is assumed that you are reinserting the same exact block due to
* a race condition, and will throw a runtime exception.
*
* @param blockName block name
* @param cacheKey block cache key
* @param cachedItem block buffer
*/
public void cacheBlock(String blockName, Cacheable cachedItem) {
public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem) {
Entry<Integer, SingleSizeCache> scacheEntry = getHigherBlock(cachedItem
.getSerializedLength());
@ -212,15 +213,15 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
* This will throw a runtime exception if we try to cache the same value
* twice
*/
scache.cacheBlock(blockName, cachedItem);
scache.cacheBlock(cacheKey, cachedItem);
}
/**
* We don't care about whether its in memory or not, so we just pass the call
* through.
*/
public void cacheBlock(String blockName, Cacheable buf, boolean inMemory) {
cacheBlock(blockName, buf);
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
cacheBlock(cacheKey, buf);
}
public CacheStats getStats() {
@ -234,7 +235,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
* @param caching
* @return buffer of specified block name, or null if not in cache
*/
public Cacheable getBlock(String key, boolean caching) {
public Cacheable getBlock(BlockCacheKey key, boolean caching) {
SingleSizeCache cachedBlock = backingStore.get(key);
if (cachedBlock == null) {
stats.miss(caching);
@ -255,24 +256,24 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
* Evicts a block from the cache. This is public, and thus contributes to the
* the evict counter.
*/
public boolean evictBlock(String key) {
SingleSizeCache cacheEntry = backingStore.get(key);
public boolean evictBlock(BlockCacheKey cacheKey) {
SingleSizeCache cacheEntry = backingStore.get(cacheKey);
if (cacheEntry == null) {
return false;
} else {
cacheEntry.evictBlock(key);
cacheEntry.evictBlock(cacheKey);
return true;
}
}
@Override
public void onEviction(String key, SingleSizeCache notifier) {
public void onEviction(BlockCacheKey key, SingleSizeCache notifier) {
stats.evicted();
backingStore.remove(key);
}
@Override
public void onInsertion(String key, SingleSizeCache notifier) {
public void onInsertion(BlockCacheKey key, SingleSizeCache notifier) {
backingStore.put(key, notifier);
}
@ -402,10 +403,10 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
}
}
public int evictBlocksByPrefix(String prefix) {
public int evictBlocksByHfileName(String hfileName) {
int numEvicted = 0;
for (String key : backingStore.keySet()) {
if (key.startsWith(prefix)) {
for (BlockCacheKey key : backingStore.keySet()) {
if (key.getHfileName().equals(hfileName)) {
if (evictBlock(key))
++numEvicted;
}

View File

@ -20,6 +20,8 @@
package org.apache.hadoop.hbase.io.hfile.slab;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
/**
* Interface for objects that want to know when actions occur in a SingleSizeCache.
* */
@ -31,7 +33,7 @@ interface SlabItemActionWatcher {
* @param key the key of the item being evicted
* @param notifier the object notifying the SlabCache of the eviction.
*/
void onEviction(String key, SingleSizeCache notifier);
void onEviction(BlockCacheKey key, SingleSizeCache notifier);
/**
* This is called as a callback when an item is inserted into a SingleSizeCache.
@ -39,5 +41,5 @@ interface SlabItemActionWatcher {
* @param key the key of the item being added
* @param notifier the object notifying the SlabCache of the insertion..
*/
void onInsertion(String key, SingleSizeCache notifier);
void onInsertion(BlockCacheKey key, SingleSizeCache notifier);
}

View File

@ -155,7 +155,7 @@ public class CacheTestUtils {
public static void hammerSingleKey(final BlockCache toBeTested,
int BlockSize, int numThreads, int numQueries) throws Exception {
final String key = "key";
final BlockCacheKey key = new BlockCacheKey("key", 0);
final byte[] buf = new byte[5 * 1024];
Arrays.fill(buf, (byte) 5);
@ -206,7 +206,7 @@ public class CacheTestUtils {
@Override
public void doAnAction() throws Exception {
for (int j = 0; j < 100; j++) {
String key = "key_" + finalI + "_" + j;
BlockCacheKey key = new BlockCacheKey("key_" + finalI + "_" + j, 0);
Arrays.fill(buf, (byte) (finalI * j));
final ByteArrayCacheable bac = new ByteArrayCacheable(buf);
@ -325,14 +325,14 @@ public class CacheTestUtils {
;
returnedBlocks[i] = new HFileBlockPair();
returnedBlocks[i].blockName = strKey;
returnedBlocks[i].blockName = new BlockCacheKey(strKey, 0);
returnedBlocks[i].block = generated;
}
return returnedBlocks;
}
private static class HFileBlockPair {
String blockName;
BlockCacheKey blockName;
HFileBlock block;
}
}

View File

@ -144,7 +144,8 @@ public class TestCacheOnWrite {
@After
public void tearDown() {
blockCache.evictBlocksByPrefix("");
cacheConf = new CacheConfig(conf);
blockCache = cacheConf.getBlockCache();
}
@Test
@ -173,7 +174,7 @@ public class TestCacheOnWrite {
// Flags: don't cache the block, use pread, this is not a compaction.
HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
false);
String blockCacheKey = HFile.getBlockCacheKey(reader.getName(), offset);
BlockCacheKey blockCacheKey = HFile.getBlockCacheKey(reader.getName(), offset);
boolean isCached = blockCache.getBlock(blockCacheKey, true) != null;
boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
assertEquals(testName + " " + block, shouldBeCached, isCached);

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.io.hfile;
import java.nio.ByteBuffer;
import java.util.LinkedList;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import junit.framework.TestCase;
@ -65,7 +64,7 @@ public class TestCachedBlockQueue extends TestCase {
assertEquals(queue.heapSize(), expectedSize);
for (int i = 1; i <= 8; i++) {
assertEquals(queue.pollLast().getName(), "cb"+i);
assertEquals(queue.pollLast().getCacheKey().getHfileName(), "cb"+i);
}
}
@ -110,14 +109,14 @@ public class TestCachedBlockQueue extends TestCase {
assertEquals(queue.heapSize(), expectedSize);
for (int i = 0; i <= 8; i++) {
assertEquals(queue.pollLast().getName(), "cb"+i);
assertEquals(queue.pollLast().getCacheKey().getHfileName(), "cb"+i);
}
}
private static class CachedBlock extends org.apache.hadoop.hbase.io.hfile.CachedBlock
{
public CachedBlock(final long heapSize, String name, long accessTime) {
super(name,
super(new BlockCacheKey(name, 0),
new Cacheable() {
@Override
public long heapSize() {

View File

@ -468,8 +468,7 @@ public class TestHFileBlockIndex {
LOG.info("Index block size: " + indexBlockSize + ", compression: "
+ compr);
// Evict all blocks that were cached-on-write by the previous invocation.
blockCache.evictBlocksByPrefix(hfilePath.getName()
+ HFile.CACHE_KEY_SEPARATOR);
blockCache.evictBlocksByHfileName(hfilePath.getName());
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, indexBlockSize);
Set<String> keyStrSet = new HashSet<String>();

View File

@ -83,7 +83,7 @@ public class TestLruBlockCache {
// Add all the blocks
for (CachedItem block : blocks) {
cache.cacheBlock(block.blockName, block);
cache.cacheBlock(block.cacheKey, block);
}
// Let the eviction run
@ -112,12 +112,12 @@ public class TestLruBlockCache {
// Confirm empty
for (CachedItem block : blocks) {
assertTrue(cache.getBlock(block.blockName, true) == null);
assertTrue(cache.getBlock(block.cacheKey, true) == null);
}
// Add blocks
for (CachedItem block : blocks) {
cache.cacheBlock(block.blockName, block);
cache.cacheBlock(block.cacheKey, block);
expectedCacheSize += block.cacheBlockHeapSize();
}
@ -126,7 +126,7 @@ public class TestLruBlockCache {
// Check if all blocks are properly cached and retrieved
for (CachedItem block : blocks) {
HeapSize buf = cache.getBlock(block.blockName, true);
HeapSize buf = cache.getBlock(block.cacheKey, true);
assertTrue(buf != null);
assertEquals(buf.heapSize(), block.heapSize());
}
@ -134,7 +134,7 @@ public class TestLruBlockCache {
// Re-add same blocks and ensure nothing has changed
for (CachedItem block : blocks) {
try {
cache.cacheBlock(block.blockName, block);
cache.cacheBlock(block.cacheKey, block);
assertTrue("Cache should not allow re-caching a block", false);
} catch(RuntimeException re) {
// expected
@ -146,7 +146,7 @@ public class TestLruBlockCache {
// Check if all blocks are properly cached and retrieved
for (CachedItem block : blocks) {
HeapSize buf = cache.getBlock(block.blockName, true);
HeapSize buf = cache.getBlock(block.cacheKey, true);
assertTrue(buf != null);
assertEquals(buf.heapSize(), block.heapSize());
}
@ -172,7 +172,7 @@ public class TestLruBlockCache {
// Add all the blocks
for (CachedItem block : blocks) {
cache.cacheBlock(block.blockName, block);
cache.cacheBlock(block.cacheKey, block);
expectedCacheSize += block.cacheBlockHeapSize();
}
@ -191,10 +191,10 @@ public class TestLruBlockCache {
(maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
// All blocks except block 0 and 1 should be in the cache
assertTrue(cache.getBlock(blocks[0].blockName, true) == null);
assertTrue(cache.getBlock(blocks[1].blockName, true) == null);
assertTrue(cache.getBlock(blocks[0].cacheKey, true) == null);
assertTrue(cache.getBlock(blocks[1].cacheKey, true) == null);
for(int i=2;i<blocks.length;i++) {
assertEquals(cache.getBlock(blocks[i].blockName, true),
assertEquals(cache.getBlock(blocks[i].cacheKey, true),
blocks[i]);
}
}
@ -214,14 +214,14 @@ public class TestLruBlockCache {
// Add and get the multi blocks
for (CachedItem block : multiBlocks) {
cache.cacheBlock(block.blockName, block);
cache.cacheBlock(block.cacheKey, block);
expectedCacheSize += block.cacheBlockHeapSize();
assertEquals(cache.getBlock(block.blockName, true), block);
assertEquals(cache.getBlock(block.cacheKey, true), block);
}
// Add the single blocks (no get)
for (CachedItem block : singleBlocks) {
cache.cacheBlock(block.blockName, block);
cache.cacheBlock(block.cacheKey, block);
expectedCacheSize += block.heapSize();
}
@ -246,14 +246,14 @@ public class TestLruBlockCache {
// This test makes multi go barely over its limit, in-memory
// empty, and the rest in single. Two single evictions and
// one multi eviction expected.
assertTrue(cache.getBlock(singleBlocks[0].blockName, true) == null);
assertTrue(cache.getBlock(multiBlocks[0].blockName, true) == null);
assertTrue(cache.getBlock(singleBlocks[0].cacheKey, true) == null);
assertTrue(cache.getBlock(multiBlocks[0].cacheKey, true) == null);
// And all others to be cached
for(int i=1;i<4;i++) {
assertEquals(cache.getBlock(singleBlocks[i].blockName, true),
assertEquals(cache.getBlock(singleBlocks[i].cacheKey, true),
singleBlocks[i]);
assertEquals(cache.getBlock(multiBlocks[i].blockName, true),
assertEquals(cache.getBlock(multiBlocks[i].cacheKey, true),
multiBlocks[i]);
}
}
@ -285,16 +285,16 @@ public class TestLruBlockCache {
for(int i=0;i<3;i++) {
// Just add single blocks
cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i]);
cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
expectedCacheSize += singleBlocks[i].cacheBlockHeapSize();
// Add and get multi blocks
cache.cacheBlock(multiBlocks[i].blockName, multiBlocks[i]);
cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]);
expectedCacheSize += multiBlocks[i].cacheBlockHeapSize();
cache.getBlock(multiBlocks[i].blockName, true);
cache.getBlock(multiBlocks[i].cacheKey, true);
// Add memory blocks as such
cache.cacheBlock(memoryBlocks[i].blockName, memoryBlocks[i], true);
cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true);
expectedCacheSize += memoryBlocks[i].cacheBlockHeapSize();
}
@ -306,77 +306,77 @@ public class TestLruBlockCache {
assertEquals(expectedCacheSize, cache.heapSize());
// Insert a single block, oldest single should be evicted
cache.cacheBlock(singleBlocks[3].blockName, singleBlocks[3]);
cache.cacheBlock(singleBlocks[3].cacheKey, singleBlocks[3]);
// Single eviction, one thing evicted
assertEquals(1, cache.getEvictionCount());
assertEquals(1, cache.getEvictedCount());
// Verify oldest single block is the one evicted
assertEquals(null, cache.getBlock(singleBlocks[0].blockName, true));
assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true));
// Change the oldest remaining single block to a multi
cache.getBlock(singleBlocks[1].blockName, true);
cache.getBlock(singleBlocks[1].cacheKey, true);
// Insert another single block
cache.cacheBlock(singleBlocks[4].blockName, singleBlocks[4]);
cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]);
// Two evictions, two evicted.
assertEquals(2, cache.getEvictionCount());
assertEquals(2, cache.getEvictedCount());
// Oldest multi block should be evicted now
assertEquals(null, cache.getBlock(multiBlocks[0].blockName, true));
assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true));
// Insert another memory block
cache.cacheBlock(memoryBlocks[3].blockName, memoryBlocks[3], true);
cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true);
// Three evictions, three evicted.
assertEquals(3, cache.getEvictionCount());
assertEquals(3, cache.getEvictedCount());
// Oldest memory block should be evicted now
assertEquals(null, cache.getBlock(memoryBlocks[0].blockName, true));
assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true));
// Add a block that is twice as big (should force two evictions)
CachedItem [] bigBlocks = generateFixedBlocks(3, blockSize*3, "big");
cache.cacheBlock(bigBlocks[0].blockName, bigBlocks[0]);
cache.cacheBlock(bigBlocks[0].cacheKey, bigBlocks[0]);
// Four evictions, six evicted (inserted block 3X size, expect +3 evicted)
assertEquals(4, cache.getEvictionCount());
assertEquals(6, cache.getEvictedCount());
// Expect three remaining singles to be evicted
assertEquals(null, cache.getBlock(singleBlocks[2].blockName, true));
assertEquals(null, cache.getBlock(singleBlocks[3].blockName, true));
assertEquals(null, cache.getBlock(singleBlocks[4].blockName, true));
assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true));
assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true));
assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true));
// Make the big block a multi block
cache.getBlock(bigBlocks[0].blockName, true);
cache.getBlock(bigBlocks[0].cacheKey, true);
// Cache another single big block
cache.cacheBlock(bigBlocks[1].blockName, bigBlocks[1]);
cache.cacheBlock(bigBlocks[1].cacheKey, bigBlocks[1]);
// Five evictions, nine evicted (3 new)
assertEquals(5, cache.getEvictionCount());
assertEquals(9, cache.getEvictedCount());
// Expect three remaining multis to be evicted
assertEquals(null, cache.getBlock(singleBlocks[1].blockName, true));
assertEquals(null, cache.getBlock(multiBlocks[1].blockName, true));
assertEquals(null, cache.getBlock(multiBlocks[2].blockName, true));
assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true));
assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true));
assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true));
// Cache a big memory block
cache.cacheBlock(bigBlocks[2].blockName, bigBlocks[2], true);
cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true);
// Six evictions, twelve evicted (3 new)
assertEquals(6, cache.getEvictionCount());
assertEquals(12, cache.getEvictedCount());
// Expect three remaining in-memory to be evicted
assertEquals(null, cache.getBlock(memoryBlocks[1].blockName, true));
assertEquals(null, cache.getBlock(memoryBlocks[2].blockName, true));
assertEquals(null, cache.getBlock(memoryBlocks[3].blockName, true));
assertEquals(null, cache.getBlock(memoryBlocks[1].cacheKey, true));
assertEquals(null, cache.getBlock(memoryBlocks[2].cacheKey, true));
assertEquals(null, cache.getBlock(memoryBlocks[3].cacheKey, true));
}
@ -403,13 +403,13 @@ public class TestLruBlockCache {
// Add 5 multi blocks
for (CachedItem block : multiBlocks) {
cache.cacheBlock(block.blockName, block);
cache.getBlock(block.blockName, true);
cache.cacheBlock(block.cacheKey, block);
cache.getBlock(block.cacheKey, true);
}
// Add 5 single blocks
for(int i=0;i<5;i++) {
cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i]);
cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
}
// An eviction ran
@ -419,10 +419,10 @@ public class TestLruBlockCache {
assertEquals(4, cache.getEvictedCount());
// Should have been taken off equally from single and multi
assertEquals(null, cache.getBlock(singleBlocks[0].blockName, true));
assertEquals(null, cache.getBlock(singleBlocks[1].blockName, true));
assertEquals(null, cache.getBlock(multiBlocks[0].blockName, true));
assertEquals(null, cache.getBlock(multiBlocks[1].blockName, true));
assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true));
assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true));
assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true));
assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true));
// Let's keep "scanning" by adding single blocks. From here on we only
// expect evictions from the single bucket.
@ -432,7 +432,7 @@ public class TestLruBlockCache {
// 12 more evicted.
for(int i=5;i<18;i++) {
cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i]);
cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
}
// 4 total evictions, 16 total evicted
@ -469,14 +469,14 @@ public class TestLruBlockCache {
for(int i=0;i<10;i++) {
// Just add single blocks
cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i]);
cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]);
// Add and get multi blocks
cache.cacheBlock(multiBlocks[i].blockName, multiBlocks[i]);
cache.getBlock(multiBlocks[i].blockName, true);
cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]);
cache.getBlock(multiBlocks[i].cacheKey, true);
// Add memory blocks as such
cache.cacheBlock(memoryBlocks[i].blockName, memoryBlocks[i], true);
cache.cacheBlock(memoryBlocks[i].cacheKey, memoryBlocks[i], true);
}
// Do not expect any evictions yet
@ -493,16 +493,16 @@ public class TestLruBlockCache {
// And the oldest 5 blocks from each category should be gone
for(int i=0;i<5;i++) {
assertEquals(null, cache.getBlock(singleBlocks[i].blockName, true));
assertEquals(null, cache.getBlock(multiBlocks[i].blockName, true));
assertEquals(null, cache.getBlock(memoryBlocks[i].blockName, true));
assertEquals(null, cache.getBlock(singleBlocks[i].cacheKey, true));
assertEquals(null, cache.getBlock(multiBlocks[i].cacheKey, true));
assertEquals(null, cache.getBlock(memoryBlocks[i].cacheKey, true));
}
// And the newest 5 blocks should still be accessible
for(int i=5;i<10;i++) {
assertEquals(singleBlocks[i], cache.getBlock(singleBlocks[i].blockName, true));
assertEquals(multiBlocks[i], cache.getBlock(multiBlocks[i].blockName, true));
assertEquals(memoryBlocks[i], cache.getBlock(memoryBlocks[i].blockName, true));
assertEquals(singleBlocks[i], cache.getBlock(singleBlocks[i].cacheKey, true));
assertEquals(multiBlocks[i], cache.getBlock(multiBlocks[i].cacheKey, true));
assertEquals(memoryBlocks[i], cache.getBlock(memoryBlocks[i].cacheKey, true));
}
}
@ -643,11 +643,11 @@ public class TestLruBlockCache {
}
private static class CachedItem implements Cacheable {
String blockName;
BlockCacheKey cacheKey;
int size;
CachedItem(String blockName, int size) {
this.blockName = blockName;
this.cacheKey = new BlockCacheKey(blockName, 0);
this.size = size;
}
@ -660,7 +660,7 @@ public class TestLruBlockCache {
/** Size of the cache block holding this item. Used for verification. */
public long cacheBlockHeapSize() {
return CachedBlock.PER_BLOCK_OVERHEAD
+ ClassSize.align(2 * blockName.length())
+ ClassSize.align(cacheKey.heapSize())
+ ClassSize.align(size);
}