HBASE-22612 Address the final overview reviewing comments of HBASE-21879 (#331)

This commit is contained in:
openinx 2019-06-24 18:04:40 +08:00 committed by GitHub
parent c1e5350be7
commit 531d2902cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 25 additions and 10 deletions

View File

@ -41,7 +41,7 @@ import org.slf4j.LoggerFactory;
public class ByteBufferListOutputStream extends ByteBufferOutputStream {
private static final Logger LOG = LoggerFactory.getLogger(ByteBufferListOutputStream.class);
private ByteBuffAllocator allocator;
private final ByteBuffAllocator allocator;
// Keep track of the BBs where bytes written to. We will first try to get a BB from the pool. If
// it is not available will make a new one our own and keep writing to that. We keep track of all
// the BBs that we got from pool, separately so that on closeAndPutbackBuffers, we can make sure

View File

@ -193,8 +193,8 @@ public class HFileContext implements HeapSize, Cloneable {
}
/**
* HeapSize implementation. NOTE : The heapsize should be altered as and when new state variable
* are added
* HeapSize implementation. NOTE : The heap size should be altered when new state variable are
* added.
* @return heap size of the HFileContext
*/
@Override

View File

@ -26,7 +26,7 @@ import org.apache.hbase.thirdparty.io.netty.util.ReferenceCounted;
/**
* Maintain an reference count integer inside to track life cycle of {@link ByteBuff}, if the
* reference count become 0, it'll call {@link Recycler#free()} once.
* reference count become 0, it'll call {@link Recycler#free()} exactly once.
*/
@InterfaceAudience.Private
public class RefCnt extends AbstractReferenceCounted {
@ -36,8 +36,8 @@ public class RefCnt extends AbstractReferenceCounted {
/**
* Create an {@link RefCnt} with an initial reference count = 1. If the reference count become
* zero, the recycler will do nothing. Usually, an Heap {@link ByteBuff} will use this kind of
* refCnt to track its life cycle, it help to abstract the code path although it's meaningless to
* use an refCnt for heap ByteBuff.
* refCnt to track its life cycle, it help to abstract the code path although it's not really
* needed to track on heap ByteBuff.
*/
public static RefCnt create() {
return new RefCnt(ByteBuffAllocator.NONE);

View File

@ -153,8 +153,14 @@ public class LruBlockCache implements FirstLevelBlockCache {
private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size";
private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L;
/** Concurrent map (the cache) */
private transient final Map<BlockCacheKey, LruCachedBlock> map;
/**
* Defined the cache map as {@link ConcurrentHashMap} here, because in
* {@link LruBlockCache#getBlock}, we need to guarantee the atomicity of map#computeIfPresent
* (key, func). Besides, the func method must execute exactly once only when the key is present
* and under the lock context, otherwise the reference count will be messed up. Notice that the
* {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that.
*/
private transient final ConcurrentHashMap<BlockCacheKey, LruCachedBlock> map;
/** Eviction lock (locked when eviction in process) */
private transient final ReentrantLock evictionLock = new ReentrantLock(true);

View File

@ -140,7 +140,7 @@ public class BucketCache implements BlockCache, HeapSize {
transient final RAMCache ramCache;
// In this map, store the block's meta data like offset, length
@VisibleForTesting
transient ConcurrentMap<BlockCacheKey, BucketEntry> backingMap;
transient ConcurrentHashMap<BlockCacheKey, BucketEntry> backingMap;
/**
* Flag if the cache is enabled or not... We shut it off if there are IO
@ -1526,7 +1526,16 @@ public class BucketCache implements BlockCache, HeapSize {
* Wrapped the delegate ConcurrentMap with maintaining its block's reference count.
*/
static class RAMCache {
final ConcurrentMap<BlockCacheKey, RAMQueueEntry> delegate = new ConcurrentHashMap<>();
/**
* Defined the map as {@link ConcurrentHashMap} explicitly here, because in
* {@link RAMCache#get(BlockCacheKey)} and
* {@link RAMCache#putIfAbsent(BlockCacheKey, RAMQueueEntry)} , we need to guarantee the
* atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func). Besides, the
* func method can execute exactly once only when the key is present(or absent) and under the
* lock context. Otherwise, the reference count of block will be messed up. Notice that the
* {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that.
*/
final ConcurrentHashMap<BlockCacheKey, RAMQueueEntry> delegate = new ConcurrentHashMap<>();
public boolean containsKey(BlockCacheKey key) {
return delegate.containsKey(key);