From fb934909a8621d9b0242246d4498be0812a1d736 Mon Sep 17 00:00:00 2001 From: larsh Date: Tue, 13 Nov 2012 06:54:02 +0000 Subject: [PATCH] HBASE-5898 Consider double-checked locking for block cache lock (Todd, Elliot, LarsH) git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1408620 13f79535-47bb-0310-9956-ffa450edef68 --- .../hbase/io/hfile/AbstractHFileReader.java | 4 - .../hadoop/hbase/io/hfile/BlockCache.java | 5 +- .../hbase/io/hfile/DoubleBlockCache.java | 8 +- .../hadoop/hbase/io/hfile/HFileReaderV1.java | 10 +- .../hadoop/hbase/io/hfile/HFileReaderV2.java | 122 +++++++++--------- .../hadoop/hbase/io/hfile/LruBlockCache.java | 7 +- .../hbase/io/hfile/SimpleBlockCache.java | 2 +- .../hbase/io/hfile/slab/SingleSizeCache.java | 4 +- .../hadoop/hbase/io/hfile/slab/SlabCache.java | 12 +- .../hadoop/hbase/io/hfile/CacheTestUtils.java | 14 +- .../hbase/io/hfile/TestCacheOnWrite.java | 2 +- .../io/hfile/TestHFileDataBlockEncoder.java | 2 +- .../hbase/io/hfile/TestLruBlockCache.java | 74 +++++------ .../TestCacheOnWriteInSchema.java | 2 +- 14 files changed, 133 insertions(+), 135 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java index 26ce6d12b6f..e96a70f1281 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java @@ -90,10 +90,6 @@ public abstract class AbstractHFileReader implements HFile.Reader { /** Block cache configuration. */ protected final CacheConfig cacheConf; - protected AtomicLong cacheHits = new AtomicLong(); - protected AtomicLong blockLoads = new AtomicLong(); - protected AtomicLong metaLoads = new AtomicLong(); - /** Path of file */ protected final Path path; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 6f6b887fd28..90c269f5393 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -49,9 +49,12 @@ public interface BlockCache { * Fetch block from cache. * @param cacheKey Block to fetch. * @param caching Whether this request has caching enabled (used for stats) + * @param repeat Whether this is a repeat lookup for the same block + * (used to avoid double counting cache misses when doing double-check locking) + * {@see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)} * @return Block or null if block is not in 2 cache. */ - public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching); + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat); /** * Evict block from cache. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java index 90551caba79..f89c364cbb9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java @@ -91,14 +91,14 @@ public class DoubleBlockCache implements BlockCache, HeapSize { } @Override - public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat) { Cacheable cachedBlock; - if ((cachedBlock = onHeapCache.getBlock(cacheKey, caching)) != null) { + if ((cachedBlock = onHeapCache.getBlock(cacheKey, caching, repeat)) != null) { stats.hit(caching); return cachedBlock; - } else if ((cachedBlock = offHeapCache.getBlock(cacheKey, caching)) != null) { + } else if ((cachedBlock = offHeapCache.getBlock(cacheKey, caching, repeat)) != null) { if (caching) { onHeapCache.cacheBlock(cacheKey, cachedBlock); } @@ -106,7 +106,7 @@ public class DoubleBlockCache implements BlockCache, HeapSize { return cachedBlock; } - stats.miss(caching); + if (!repeat) stats.miss(caching); return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java index 436d0c4f1d1..629b3461ac6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java @@ -225,15 +225,12 @@ public class HFileReaderV1 extends AbstractHFileReader { // Per meta key from any given file, synchronize reads for said block synchronized (metaBlockIndexReader.getRootBlockKey(block)) { - metaLoads.incrementAndGet(); - // Check cache for block. If found return. if (cacheConf.isBlockCacheEnabled()) { HFileBlock cachedBlock = (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, - cacheConf.shouldCacheBlockOnRead(effectiveCategory)); + cacheConf.shouldCacheBlockOnRead(effectiveCategory), false); if (cachedBlock != null) { - cacheHits.incrementAndGet(); return cachedBlock.getBufferWithoutHeader(); } // Cache Miss, please load. @@ -285,15 +282,12 @@ public class HFileReaderV1 extends AbstractHFileReader { // the other choice is to duplicate work (which the cache would prevent you // from doing). synchronized (dataBlockIndexReader.getRootBlockKey(block)) { - blockLoads.incrementAndGet(); - // Check cache for block. If found return. if (cacheConf.isBlockCacheEnabled()) { HFileBlock cachedBlock = (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, - cacheConf.shouldCacheDataOnRead()); + cacheConf.shouldCacheDataOnRead(), false); if (cachedBlock != null) { - cacheHits.incrementAndGet(); return cachedBlock.getBufferWithoutHeader(); } // Carry on, please load. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index 71e4d09cb6c..6eecd4265c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IdLock; @@ -211,8 +210,6 @@ public class HFileReaderV2 extends AbstractHFileReader { // is OK to do for meta blocks because the meta block index is always // single-level. synchronized (metaBlockIndexReader.getRootBlockKey(block)) { - metaLoads.incrementAndGet(); - // Check cache for block. If found return. long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block); BlockCacheKey cacheKey = new BlockCacheKey(name, metaBlockOffset, @@ -221,11 +218,10 @@ public class HFileReaderV2 extends AbstractHFileReader { cacheBlock &= cacheConf.shouldCacheDataOnRead(); if (cacheConf.isBlockCacheEnabled()) { HFileBlock cachedBlock = - (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock); + (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock, false); if (cachedBlock != null) { // Return a distinct 'shallow copy' of the block, // so pos does not get messed by the scanner - cacheHits.incrementAndGet(); return cachedBlock.getBufferWithoutHeader(); } // Cache Miss, please load. @@ -286,66 +282,72 @@ public class HFileReaderV2 extends AbstractHFileReader { new BlockCacheKey(name, dataBlockOffset, dataBlockEncoder.getEffectiveEncodingInCache(isCompaction), expectedBlockType); - IdLock.Entry lockEntry = offsetLock.getLockEntry(dataBlockOffset); + + boolean useLock = false; + IdLock.Entry lockEntry = null; try { - blockLoads.incrementAndGet(); + while (true) { - // Check cache for block. If found return. - if (cacheConf.isBlockCacheEnabled()) { - HFileBlock cachedBlock = (HFileBlock) - cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock); - if (cachedBlock != null) { - BlockCategory blockCategory = - cachedBlock.getBlockType().getCategory(); - cacheHits.incrementAndGet(); - - - if (cachedBlock.getBlockType() == BlockType.DATA) { - HFile.dataBlockReadCnt.incrementAndGet(); - } - - validateBlockType(cachedBlock, expectedBlockType); - - // Validate encoding type for encoded blocks. We include encoding - // type in the cache key, and we expect it to match on a cache hit. - if (cachedBlock.getBlockType() == BlockType.ENCODED_DATA && - cachedBlock.getDataBlockEncoding() != - dataBlockEncoder.getEncodingInCache()) { - throw new IOException("Cached block under key " + cacheKey + " " + - "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + - " (expected: " + dataBlockEncoder.getEncodingInCache() + ")"); - } - return cachedBlock; + if (useLock) { + lockEntry = offsetLock.getLockEntry(dataBlockOffset); } - // Carry on, please load. + + // Check cache for block. If found return. + if (cacheConf.isBlockCacheEnabled()) { + // Try and get the block from the block cache. If the useLock variable is true then this + // is the second time through the loop and it should not be counted as a block cache miss. + HFileBlock cachedBlock = (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, + cacheBlock, useLock); + if (cachedBlock != null) { + if (cachedBlock.getBlockType() == BlockType.DATA) { + HFile.dataBlockReadCnt.incrementAndGet(); + } + + validateBlockType(cachedBlock, expectedBlockType); + + // Validate encoding type for encoded blocks. We include encoding + // type in the cache key, and we expect it to match on a cache hit. + if (cachedBlock.getBlockType() == BlockType.ENCODED_DATA + && cachedBlock.getDataBlockEncoding() != dataBlockEncoder.getEncodingInCache()) { + throw new IOException("Cached block under key " + cacheKey + " " + + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: " + + dataBlockEncoder.getEncodingInCache() + ")"); + } + return cachedBlock; + } + // Carry on, please load. + } + if (!useLock) { + // check cache again with lock + useLock = true; + continue; + } + + // Load block from filesystem. + long startTimeNs = System.nanoTime(); + HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, -1, + pread); + hfileBlock = dataBlockEncoder.diskToCacheFormat(hfileBlock, isCompaction); + validateBlockType(hfileBlock, expectedBlockType); + + final long delta = System.nanoTime() - startTimeNs; + HFile.offerReadLatency(delta, pread); + + // Cache the block if necessary + if (cacheBlock && cacheConf.shouldCacheBlockOnRead(hfileBlock.getBlockType().getCategory())) { + cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory()); + } + + if (hfileBlock.getBlockType() == BlockType.DATA) { + HFile.dataBlockReadCnt.incrementAndGet(); + } + + return hfileBlock; } - - // Load block from filesystem. - long startTimeNs = System.nanoTime(); - HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, - onDiskBlockSize, -1, pread); - hfileBlock = dataBlockEncoder.diskToCacheFormat(hfileBlock, - isCompaction); - validateBlockType(hfileBlock, expectedBlockType); - BlockCategory blockCategory = hfileBlock.getBlockType().getCategory(); - - final long delta = System.nanoTime() - startTimeNs; - HFile.offerReadLatency(delta, pread); - - // Cache the block if necessary - if (cacheBlock && cacheConf.shouldCacheBlockOnRead( - hfileBlock.getBlockType().getCategory())) { - cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock, - cacheConf.isInMemory()); - } - - if (hfileBlock.getBlockType() == BlockType.DATA) { - HFile.dataBlockReadCnt.incrementAndGet(); - } - - return hfileBlock; } finally { - offsetLock.releaseLockEntry(lockEntry); + if (lockEntry != null) { + offsetLock.releaseLockEntry(lockEntry); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 74b6212df4c..5ea08a89368 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -332,13 +332,16 @@ public class LruBlockCache implements BlockCache, HeapSize { * Get the buffer of the block with the specified name. * @param cacheKey block's cache key * @param caching true if the caller caches blocks on cache misses + * @param repeat Whether this is a repeat lookup for the same block + * (used to avoid double counting cache misses when doing double-check locking) + * {@see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)} * @return buffer of specified cache key, or null if not in cache */ @Override - public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat) { CachedBlock cb = map.get(cacheKey); if(cb == null) { - stats.miss(caching); + if (!repeat) stats.miss(caching); return null; } stats.hit(caching); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java index 713071079d6..62533120b8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java @@ -69,7 +69,7 @@ public class SimpleBlockCache implements BlockCache { return cache.size(); } - public synchronized Cacheable getBlock(BlockCacheKey cacheKey, boolean caching) { + public synchronized Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat) { processQueue(); // clear out some crap. Ref ref = cache.get(cacheKey); if (ref == null) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java index 25276bb0275..3b2d85eb621 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java @@ -153,10 +153,10 @@ public class SingleSizeCache implements BlockCache, HeapSize { } @Override - public Cacheable getBlock(BlockCacheKey key, boolean caching) { + public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat) { CacheablePair contentBlock = backingMap.get(key); if (contentBlock == null) { - stats.miss(caching); + if (!repeat) stats.miss(caching); return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java index 1bd7adb49e6..0711fa61571 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java @@ -229,23 +229,23 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize { /** * Get the buffer of the block with the specified name. - * - * @param key * @param caching + * @param key + * * @return buffer of specified block name, or null if not in cache */ - public Cacheable getBlock(BlockCacheKey key, boolean caching) { + public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat) { SingleSizeCache cachedBlock = backingStore.get(key); if (cachedBlock == null) { - stats.miss(caching); + if (!repeat) stats.miss(caching); return null; } - Cacheable contentBlock = cachedBlock.getBlock(key, caching); + Cacheable contentBlock = cachedBlock.getBlock(key, caching, false); if (contentBlock != null) { stats.hit(caching); - } else { + } else if (!repeat) { stats.miss(caching); } return contentBlock; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java index 6eb26baecaf..1cad72d5541 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java @@ -90,12 +90,12 @@ public class CacheTestUtils { } toBeTested.cacheBlock(ourBlock.blockName, ourBlock.block); Cacheable retrievedBlock = toBeTested.getBlock(ourBlock.blockName, - false); + false, false); if (retrievedBlock != null) { assertEquals(ourBlock.block, retrievedBlock); toBeTested.evictBlock(ourBlock.blockName); hits.incrementAndGet(); - assertNull(toBeTested.getBlock(ourBlock.blockName, false)); + assertNull(toBeTested.getBlock(ourBlock.blockName, false, false)); } else { miss.incrementAndGet(); } @@ -123,7 +123,7 @@ public class CacheTestUtils { HFileBlockPair[] blocks = generateHFileBlocks(numBlocks, blockSize); // Confirm empty for (HFileBlockPair block : blocks) { - assertNull(toBeTested.getBlock(block.blockName, true)); + assertNull(toBeTested.getBlock(block.blockName, true, false)); } // Add blocks @@ -136,7 +136,7 @@ public class CacheTestUtils { // MapMaker makes no guarantees when it will evict, so neither can we. for (HFileBlockPair block : blocks) { - HFileBlock buf = (HFileBlock) toBeTested.getBlock(block.blockName, true); + HFileBlock buf = (HFileBlock) toBeTested.getBlock(block.blockName, true, false); if (buf != null) { assertEquals(block.block, buf); } @@ -147,7 +147,7 @@ public class CacheTestUtils { for (HFileBlockPair block : blocks) { try { - if (toBeTested.getBlock(block.blockName, true) != null) { + if (toBeTested.getBlock(block.blockName, true, false) != null) { toBeTested.cacheBlock(block.blockName, block.block); fail("Cache should not allow re-caching a block"); } @@ -177,7 +177,7 @@ public class CacheTestUtils { @Override public void doAnAction() throws Exception { ByteArrayCacheable returned = (ByteArrayCacheable) toBeTested - .getBlock(key, false); + .getBlock(key, false, false); assertArrayEquals(buf, returned.buf); totalQueries.incrementAndGet(); } @@ -216,7 +216,7 @@ public class CacheTestUtils { final ByteArrayCacheable bac = new ByteArrayCacheable(buf); ByteArrayCacheable gotBack = (ByteArrayCacheable) toBeTested - .getBlock(key, true); + .getBlock(key, true, false); if (gotBack != null) { assertArrayEquals(gotBack.buf, bac.buf); } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index d4995deb386..b5db35e1f1e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -239,7 +239,7 @@ public class TestCacheOnWrite { false, null); BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset, encodingInCache, block.getBlockType()); - boolean isCached = blockCache.getBlock(blockCacheKey, true) != null; + boolean isCached = blockCache.getBlock(blockCacheKey, true, false) != null; boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType()); if (shouldBeCached != isCached) { throw new AssertionError( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java index 141ef10ca39..c24acc48736 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java @@ -87,7 +87,7 @@ public class TestHFileDataBlockEncoder { BlockCacheKey cacheKey = new BlockCacheKey("test", 0); blockCache.cacheBlock(cacheKey, cacheBlock); - HeapSize heapSize = blockCache.getBlock(cacheKey, false); + HeapSize heapSize = blockCache.getBlock(cacheKey, false, false); assertTrue(heapSize instanceof HFileBlock); HFileBlock returnedBlock = (HFileBlock) heapSize;; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 290ba4a994d..c03a378428d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -99,7 +99,7 @@ public class TestLruBlockCache { // Confirm empty for (CachedItem block : blocks) { - assertTrue(cache.getBlock(block.cacheKey, true) == null); + assertTrue(cache.getBlock(block.cacheKey, true, false) == null); } // Add blocks @@ -113,7 +113,7 @@ public class TestLruBlockCache { // Check if all blocks are properly cached and retrieved for (CachedItem block : blocks) { - HeapSize buf = cache.getBlock(block.cacheKey, true); + HeapSize buf = cache.getBlock(block.cacheKey, true, false); assertTrue(buf != null); assertEquals(buf.heapSize(), block.heapSize()); } @@ -133,7 +133,7 @@ public class TestLruBlockCache { // Check if all blocks are properly cached and retrieved for (CachedItem block : blocks) { - HeapSize buf = cache.getBlock(block.cacheKey, true); + HeapSize buf = cache.getBlock(block.cacheKey, true, false); assertTrue(buf != null); assertEquals(buf.heapSize(), block.heapSize()); } @@ -178,9 +178,9 @@ public class TestLruBlockCache { (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); // All blocks except block 0 should be in the cache - assertTrue(cache.getBlock(blocks[0].cacheKey, true) == null); + assertTrue(cache.getBlock(blocks[0].cacheKey, true, false) == null); for(int i=1;i