From 4f1b5ab54e1f486c254e1caeee2ba05e82b4b1ef Mon Sep 17 00:00:00 2001 From: openinx Date: Fri, 14 Jun 2019 09:41:11 +0800 Subject: [PATCH] HBASE-22531 The HFileReaderImpl#shouldUseHeap return the incorrect true when disabled BlockCache (#304) --- .../hbase/io/hfile/HFileReaderImpl.java | 2 +- .../TestHFileScannerImplReferenceCount.java | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 0dae13cde97..9cef9c0c4dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1419,7 +1419,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { * boolean, boolean) */ private boolean shouldUseHeap(BlockType expectedBlockType) { - if (cacheConf.getBlockCache() == null) { + if (!cacheConf.getBlockCache().isPresent()) { return false; } else if (!cacheConf.isCombinedBlockCache()) { // Block to cache in LruBlockCache must be an heap one. So just allocate block memory from diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java index 87dd29e5de4..dd9a1c80bed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io.hfile; import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; +import static org.apache.hadoop.hbase.HConstants.HFILE_BLOCK_CACHE_SIZE_KEY; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.BUFFER_SIZE_KEY; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MAX_BUFFER_COUNT_KEY; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MIN_ALLOCATE_SIZE_KEY; @@ -422,4 +423,28 @@ public class TestHFileScannerImplReferenceCount { Assert.assertNull(scanner.curBlock); Assert.assertTrue(scanner.prevBlocks.isEmpty()); } + + @Test + public void testDisabledBlockCache() throws Exception { + writeHFile(conf, fs, hfilePath, Algorithm.NONE, DataBlockEncoding.NONE, CELL_COUNT); + // Set LruBlockCache + conf.setFloat(HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); + BlockCache defaultBC = BlockCacheFactory.createBlockCache(conf); + Assert.assertNull(defaultBC); + CacheConfig cacheConfig = new CacheConfig(conf, null, defaultBC, allocator); + Assert.assertFalse(cacheConfig.isCombinedBlockCache()); // Must be LruBlockCache. + HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConfig, true, conf); + Assert.assertTrue(reader instanceof HFileReaderImpl); + // We've build a HFile tree with index = 16. + Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels()); + + HFileBlock block1 = reader.getDataBlockIndexReader() + .loadDataBlockWithScanInfo(firstCell, null, true, true, false, DataBlockEncoding.NONE) + .getHFileBlock(); + + Assert.assertTrue(block1.isSharedMem()); + Assert.assertTrue(block1 instanceof SharedMemHFileBlock); + Assert.assertEquals(1, block1.refCnt()); + Assert.assertTrue(block1.release()); + } }