HBASE-22531 The HFileReaderImpl#shouldUseHeap return the incorrect true when disabled BlockCache (#304)

This commit is contained in:
openinx 2019-06-14 09:41:11 +08:00 committed by huzheng
parent d64c3a2611
commit ccabbdd40a
2 changed files with 26 additions and 1 deletions

View File

@ -1419,7 +1419,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
* boolean, boolean) * boolean, boolean)
*/ */
private boolean shouldUseHeap(BlockType expectedBlockType) { private boolean shouldUseHeap(BlockType expectedBlockType) {
if (cacheConf.getBlockCache() == null) { if (!cacheConf.getBlockCache().isPresent()) {
return false; return false;
} else if (!cacheConf.isCombinedBlockCache()) { } else if (!cacheConf.isCombinedBlockCache()) {
// Block to cache in LruBlockCache must be an heap one. So just allocate block memory from // Block to cache in LruBlockCache must be an heap one. So just allocate block memory from

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io.hfile;
import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
import static org.apache.hadoop.hbase.HConstants.HFILE_BLOCK_CACHE_SIZE_KEY;
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.BUFFER_SIZE_KEY; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.BUFFER_SIZE_KEY;
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MAX_BUFFER_COUNT_KEY; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MAX_BUFFER_COUNT_KEY;
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MIN_ALLOCATE_SIZE_KEY; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MIN_ALLOCATE_SIZE_KEY;
@ -422,4 +423,28 @@ public class TestHFileScannerImplReferenceCount {
Assert.assertNull(scanner.curBlock); Assert.assertNull(scanner.curBlock);
Assert.assertTrue(scanner.prevBlocks.isEmpty()); Assert.assertTrue(scanner.prevBlocks.isEmpty());
} }
@Test
public void testDisabledBlockCache() throws Exception {
writeHFile(conf, fs, hfilePath, Algorithm.NONE, DataBlockEncoding.NONE, CELL_COUNT);
// Set LruBlockCache
conf.setFloat(HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
BlockCache defaultBC = BlockCacheFactory.createBlockCache(conf);
Assert.assertNull(defaultBC);
CacheConfig cacheConfig = new CacheConfig(conf, null, defaultBC, allocator);
Assert.assertFalse(cacheConfig.isCombinedBlockCache()); // Must be LruBlockCache.
HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConfig, true, conf);
Assert.assertTrue(reader instanceof HFileReaderImpl);
// We've build a HFile tree with index = 16.
Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels());
HFileBlock block1 = reader.getDataBlockIndexReader()
.loadDataBlockWithScanInfo(firstCell, null, true, true, false, DataBlockEncoding.NONE)
.getHFileBlock();
Assert.assertTrue(block1.isSharedMem());
Assert.assertTrue(block1 instanceof SharedMemHFileBlock);
Assert.assertEquals(1, block1.refCnt());
Assert.assertTrue(block1.release());
}
} }