HBASE-22211 Remove the returnBlock method because we can just call HFileBlock#release directly

This commit is contained in:
huzheng 2019-04-22 15:57:12 +08:00
parent 48aca4db30
commit 77cef7490b
13 changed files with 37 additions and 102 deletions

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.io.hfile;
import java.util.Iterator;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
/**
* Block cache interface. Anything that implements the {@link Cacheable}
@ -133,27 +132,4 @@ public interface BlockCache extends Iterable<CachedBlock> {
* @return The list of sub blockcaches that make up this one; returns null if no sub caches.
*/
BlockCache [] getBlockCaches();
/**
* Called when the scanner using the block decides to decrease refCnt of block and return the
* block once its usage is over. This API should be called after the block is used, failing to do
* so may have adverse effects by preventing the blocks from being evicted because of which it
* will prevent new hot blocks from getting added to the block cache. The implementation of the
* BlockCache will decide on what to be done with the block based on the memory type of the
* block's {@link MemoryType}. <br>
* <br>
* Note that if two handlers read from backingMap in off-heap BucketCache at the same time, BC
* will return two ByteBuff, which reference to the same memory area in buckets, but wrapped by
* two different ByteBuff, and each of them has its own independent refCnt(=1). so here, if
* returnBlock with different blocks in two handlers, it has no problem. but if both the two
* handlers returnBlock with the same block, then the refCnt exception will happen here. <br>
* TODO let's unify the ByteBuff's refCnt and BucketEntry's refCnt in HBASE-21957, after that
* we'll just call the Cacheable#release instead of calling release in some path and calling
* returnBlock in other paths in current version.
* @param cacheKey the cache key of the block
* @param block the hfileblock to be returned
*/
default void returnBlock(BlockCacheKey cacheKey, Cacheable block) {
block.release();
}
}

View File

@ -247,8 +247,8 @@ public class BlockCacheUtil {
return false;
}
} finally {
// return the block since we need to decrement the count
blockCache.returnBlock(cacheKey, existingBlock);
// Release this block to decrement the reference count.
existingBlock.release();
}
}

View File

@ -379,12 +379,6 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
this.l1Cache.setMaxSize(size);
}
@Override
public void returnBlock(BlockCacheKey cacheKey, Cacheable block) {
// returnBlock is meaningful for L2 cache alone.
this.l2Cache.returnBlock(cacheKey, block);
}
@VisibleForTesting
public int getRpcRefCount(BlockCacheKey cacheKey) {
return (this.l2Cache instanceof BucketCache)

View File

@ -105,8 +105,8 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase
result = BloomFilterUtil.contains(key, keyOffset, keyLength, bloomBuf,
bloomBlock.headerSize(), bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
} finally {
// After the use return back the block if it was served from a cache.
reader.returnBlock(bloomBlock);
// After the use, should release the block to deallocate byte buffers.
bloomBlock.release();
}
if (numPositivesPerChunk != null && result) {
// Update statistics. Only used in unit tests.
@ -144,10 +144,10 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase
try {
ByteBuff bloomBuf = bloomBlock.getBufferReadOnly();
result = BloomFilterUtil.contains(keyCell, bloomBuf, bloomBlock.headerSize(),
bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount, type);
bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount, type);
} finally {
// After the use return back the block if it was served from a cache.
reader.returnBlock(bloomBlock);
// After the use, should release the block to deallocate the byte buffers.
bloomBlock.release();
}
if (numPositivesPerChunk != null && result) {
// Update statistics. Only used in unit tests.

View File

@ -407,12 +407,6 @@ public class HFile {
final boolean updateCacheMetrics, BlockType expectedBlockType,
DataBlockEncoding expectedDataBlockEncoding)
throws IOException;
/**
* Return the given block back to the cache, if it was obtained from cache.
* @param block Block to be returned.
*/
void returnBlock(HFileBlock block);
}
/** An interface used by clients to open and iterate an {@link HFile}. */

View File

@ -381,10 +381,9 @@ public class HFileBlockIndex {
nextIndexedKey = tmpNextIndexKV;
}
} finally {
if (!dataBlock) {
// Return the block immediately if it is not the
// data block
cachingBlockReader.returnBlock(block);
if (!dataBlock && block != null) {
// Release the block immediately if it is not the data block
block.release();
}
}
}
@ -394,9 +393,11 @@ public class HFileBlockIndex {
// Though we have retrieved a data block we have found an issue
// in the retrieved data block. Hence returned the block so that
// the ref count can be decremented
cachingBlockReader.returnBlock(block);
throw new IOException("Reached a data block at level " + lookupLevel +
" but the number of levels is " + searchTreeLevel);
if (block != null) {
block.release();
}
throw new IOException("Reached a data block at level " + lookupLevel
+ " but the number of levels is " + searchTreeLevel);
}
// set the next indexed key for the current block.
@ -436,7 +437,7 @@ public class HFileBlockIndex {
byte[] bytes = b.toBytes(keyOffset, keyLen);
targetMidKey = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length);
} finally {
cachingBlockReader.returnBlock(midLeafBlock);
midLeafBlock.release();
}
} else {
// The middle of the root-level index.

View File

@ -291,7 +291,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
// Ideally here the readBlock won't find the block in cache. We call this
// readBlock so that block data is read from FS and cached in BC. we must call
// returnBlock here to decrease the reference count of block.
returnBlock(block);
block.release();
}
}
} catch (IOException e) {
@ -377,20 +377,6 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
return fileSize;
}
@Override
public void returnBlock(HFileBlock block) {
if (block != null) {
if (this.cacheConf.getBlockCache().isPresent()) {
BlockCacheKey cacheKey = new BlockCacheKey(this.getFileContext().getHFileName(),
block.getOffset(), this.isPrimaryReplicaReader(), block.getBlockType());
cacheConf.getBlockCache().get().returnBlock(cacheKey, block);
} else {
// Release the block here, it means the RPC path didn't ref to this block any more.
block.release();
}
}
}
/**
* @return the first key in the file. May be null if file has no entries. Note
* that this is not the first row key, but rather the byte form of the
@ -553,23 +539,15 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
this.curBlock = null;
}
private void returnBlock(HFileBlock block) {
if (LOG.isTraceEnabled()) {
LOG.trace("Returning the block : " + block);
}
this.reader.returnBlock(block);
}
private void returnBlocks(boolean returnAll) {
for (int i = 0; i < this.prevBlocks.size(); i++) {
returnBlock(this.prevBlocks.get(i));
}
this.prevBlocks.forEach(HFileBlock::release);
this.prevBlocks.clear();
if (returnAll && this.curBlock != null) {
returnBlock(this.curBlock);
this.curBlock.release();
this.curBlock = null;
}
}
@Override
public boolean isSeeked(){
return blockBuffer != null;
@ -897,7 +875,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
// The first key in the current block 'seekToBlock' is greater than the given
// seekBefore key. We will go ahead by reading the next block that satisfies the
// given key. Return the current block before reading the next one.
reader.returnBlock(seekToBlock);
seekToBlock.release();
// It is important that we compute and pass onDiskSize to the block
// reader so that it does not have to read the header separately to
// figure out the size. Currently, we do not have a way to do this
@ -948,7 +926,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
if (block != null && !block.getBlockType().isData()) { // Findbugs: NP_NULL_ON_SOME_PATH
// Whatever block we read we will be returning it unless
// it is a datablock. Just in case the blocks are non data blocks
reader.returnBlock(block);
block.release();
}
} while (!block.getBlockType().isData());
@ -1325,9 +1303,9 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) {
HFileBlock compressedBlock = cachedBlock;
cachedBlock = compressedBlock.unpack(hfileContext, fsBlockReader);
// In case of compressed block after unpacking we can return the compressed block
// In case of compressed block after unpacking we can release the compressed block
if (compressedBlock != cachedBlock) {
cache.returnBlock(cacheKey, compressedBlock);
compressedBlock.release();
}
}
validateBlockType(cachedBlock, expectedBlockType);
@ -1361,11 +1339,9 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
// schema definition change.
LOG.info("Evicting cached block with key " + cacheKey
+ " because of a data block encoding mismatch" + "; expected: "
+ expectedDataBlockEncoding + ", actual: " + actualDataBlockEncoding + ", path="
+ path);
// This is an error scenario. so here we need to decrement the
// count.
cache.returnBlock(cacheKey, cachedBlock);
+ expectedDataBlockEncoding + ", actual: " + actualDataBlockEncoding + ", path=" + path);
// This is an error scenario. so here we need to release the block.
cachedBlock.release();
cache.evictBlock(cacheKey);
}
return null;

View File

@ -530,8 +530,8 @@ public class LruBlockCache implements FirstLevelBlockCache {
if (result instanceof HFileBlock && ((HFileBlock) result).usesSharedMemory()) {
Cacheable original = result;
result = ((HFileBlock) original).deepCloneOnHeap();
// deepClone an new one, so need to put the original one back to free it.
victimHandler.returnBlock(cacheKey, original);
// deepClone an new one, so need to release the original one to deallocate it.
original.release();
}
cacheBlock(cacheKey, result, /* inMemory = */ false);
}

View File

@ -456,8 +456,10 @@ public class StoreFileReader {
LOG.error("Bad bloom filter data -- proceeding without", e);
setGeneralBloomFilterFaulty();
} finally {
// Return the bloom block so that its ref count can be decremented.
reader.returnBlock(bloomBlock);
// Release the bloom block so that its ref count can be decremented.
if (bloomBlock != null) {
bloomBlock.release();
}
}
return true;
}

View File

@ -336,15 +336,15 @@ public class TestCacheOnWrite {
// Call return twice because for the isCache cased the counter would have got incremented
// twice. Notice that here we need to returnBlock with different blocks. see comments in
// BucketCache#returnBlock.
blockCache.returnBlock(blockCacheKey, blockPair.getSecond());
blockPair.getSecond().release();
if (cacheCompressedData) {
if (this.compress == Compression.Algorithm.NONE
|| cowType == CacheOnWriteType.INDEX_BLOCKS
|| cowType == CacheOnWriteType.BLOOM_BLOCKS) {
blockCache.returnBlock(blockCacheKey, blockPair.getFirst());
blockPair.getFirst().release();
}
} else {
blockCache.returnBlock(blockCacheKey, blockPair.getFirst());
blockPair.getFirst().release();
}
}
}

View File

@ -223,7 +223,7 @@ public class TestHFile {
Assert.assertTrue(hfb.isOnHeap());
}
} finally {
combined.returnBlock(key, cachedBlock);
cachedBlock.release();
}
block.release(); // return back the ByteBuffer back to allocator.
}

View File

@ -175,10 +175,6 @@ public class TestHFileBlockIndex {
this.realReader = realReader;
}
@Override
public void returnBlock(HFileBlock block) {
}
@Override
public HFileBlock readBlock(long offset, long onDiskSize,
boolean cacheBlock, boolean pread, boolean isCompaction,

View File

@ -735,10 +735,6 @@ public class TestHeapMemoryManager {
return null;
}
@Override
public void returnBlock(BlockCacheKey cacheKey, Cacheable buf) {
}
public void setTestBlockSize(long testBlockSize) {
this.testBlockSize = testBlockSize;
}