diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java index c85675b7d84..e8e77dc2bec 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java @@ -297,6 +297,12 @@ public class ByteBuffAllocator { } } } + this.usedBufCount.set(0); + this.maxPoolSizeInfoLevelLogged = false; + this.poolAllocationBytes.reset(); + this.heapAllocationBytes.reset(); + this.lastPoolAllocationBytes = 0; + this.lastHeapAllocationBytes = 0; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java new file mode 100644 index 00000000000..73c0db4cce4 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import org.apache.hadoop.hbase.io.ByteBuffAllocator; +import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * The {@link ByteBuffAllocator} won't allocate pooled heap {@link ByteBuff} now; at the same time, + * if allocate an off-heap {@link ByteBuff} from allocator, then it must be a pooled one. That's to + * say, an exclusive memory HFileBlock would must be an heap block and a shared memory HFileBlock + * would must be an off-heap block. + *
+ * The exclusive memory HFileBlock will do nothing when calling retain or release methods, because + * its memory will be garbage collected by JVM, even if its reference count decrease to zero, we can + * do nothing for the de-allocating. + *
+ * @see org.apache.hadoop.hbase.io.hfile.SharedMemHFileBlock
+ */
+@InterfaceAudience.Private
+public class ExclusiveMemHFileBlock extends HFileBlock {
+
+ ExclusiveMemHFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
+ int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader,
+ long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader,
+ HFileContext fileContext) {
+ super(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, buf,
+ fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext,
+ ByteBuffAllocator.HEAP);
+ }
+
+ @Override
+ public int refCnt() {
+ return 0;
+ }
+
+ @Override
+ public ExclusiveMemHFileBlock retain() {
+ // do nothing
+ return this;
+ }
+
+ @Override
+ public boolean release() {
+ // do nothing
+ return false;
+ }
+
+ @Override
+ public boolean isSharedMem() {
+ return false;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 452b68c235c..14ed275cc33 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -285,7 +285,7 @@ public class HFileBlock implements Cacheable {
boolean usesChecksum = buf.get() == (byte) 1;
long offset = buf.getLong();
int nextBlockOnDiskSize = buf.getInt();
- return new HFileBlock(newByteBuff, usesChecksum, offset, nextBlockOnDiskSize, null, alloc);
+ return createFromBuff(newByteBuff, usesChecksum, offset, nextBlockOnDiskSize, null, alloc);
}
@Override
@@ -300,28 +300,6 @@ public class HFileBlock implements Cacheable {
CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER);
}
- /**
- * Copy constructor. Creates a shallow copy of {@code that}'s buffer.
- */
- private HFileBlock(HFileBlock that) {
- this(that, false);
- }
-
- /**
- * Copy constructor. Creates a shallow/deep copy of {@code that}'s buffer as per the boolean
- * param.
- */
- private HFileBlock(HFileBlock that, boolean bufCopy) {
- init(that.blockType, that.onDiskSizeWithoutHeader, that.uncompressedSizeWithoutHeader,
- that.prevBlockOffset, that.offset, that.onDiskDataSizeWithHeader, that.nextBlockOnDiskSize,
- that.fileContext, that.allocator);
- if (bufCopy) {
- this.buf = ByteBuff.wrap(ByteBuffer.wrap(that.buf.toBytes(0, that.buf.limit())));
- } else {
- this.buf = that.buf.duplicate();
- }
- }
-
/**
* Creates a new {@link HFile} block from the given fields. This constructor
* is used only while writing blocks and caching,
@@ -336,7 +314,7 @@ public class HFileBlock implements Cacheable {
* @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader}
* @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader}
* @param prevBlockOffset see {@link #prevBlockOffset}
- * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes)
+ * @param buf block buffer with header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes)
* @param fillHeader when true, write the first 4 header fields into passed buffer.
* @param offset the file offset the block was read from
* @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader}
@@ -344,12 +322,19 @@ public class HFileBlock implements Cacheable {
*/
@VisibleForTesting
public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
- int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuffer b, boolean fillHeader,
- long offset, final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader,
- HFileContext fileContext, ByteBuffAllocator allocator) {
- init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, offset,
- onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext, allocator);
- this.buf = new SingleByteBuff(b);
+ int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader,
+ long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext,
+ ByteBuffAllocator allocator) {
+ this.blockType = blockType;
+ this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader;
+ this.uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader;
+ this.prevBlockOffset = prevBlockOffset;
+ this.offset = offset;
+ this.onDiskDataSizeWithHeader = onDiskDataSizeWithHeader;
+ this.nextBlockOnDiskSize = nextBlockOnDiskSize;
+ this.fileContext = fileContext;
+ this.allocator = allocator;
+ this.buf = buf;
if (fillHeader) {
overwriteHeader();
}
@@ -363,7 +348,7 @@ public class HFileBlock implements Cacheable {
* to that point.
* @param buf Has header, content, and trailing checksums if present.
*/
- HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, final long offset,
+ static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final long offset,
final int nextBlockOnDiskSize, HFileContext fileContext, ByteBuffAllocator allocator)
throws IOException {
buf.rewind();
@@ -374,15 +359,15 @@ public class HFileBlock implements Cacheable {
final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX);
// This constructor is called when we deserialize a block from cache and when we read a block in
// from the fs. fileCache is null when deserialized from cache so need to make up one.
- HFileContextBuilder fileContextBuilder = fileContext != null?
- new HFileContextBuilder(fileContext): new HFileContextBuilder();
+ HFileContextBuilder fileContextBuilder =
+ fileContext != null ? new HFileContextBuilder(fileContext) : new HFileContextBuilder();
fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum);
int onDiskDataSizeWithHeader;
if (usesHBaseChecksum) {
byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX);
int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
- // Use the checksum type and bytes per checksum from header, not from filecontext.
+ // Use the checksum type and bytes per checksum from header, not from fileContext.
fileContextBuilder.withChecksumType(ChecksumType.codeToType(checksumType));
fileContextBuilder.withBytesPerCheckSum(bytesPerChecksum);
} else {
@@ -393,29 +378,19 @@ public class HFileBlock implements Cacheable {
}
fileContext = fileContextBuilder.build();
assert usesHBaseChecksum == fileContext.isUseHBaseChecksum();
- init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, offset,
- onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext, allocator);
- this.offset = offset;
- this.buf = buf;
- this.buf.rewind();
- }
-
- /**
- * Called from constructors.
- */
- private void init(BlockType blockType, int onDiskSizeWithoutHeader,
- int uncompressedSizeWithoutHeader, long prevBlockOffset, long offset,
- int onDiskDataSizeWithHeader, final int nextBlockOnDiskSize, HFileContext fileContext,
- ByteBuffAllocator allocator) {
- this.blockType = blockType;
- this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader;
- this.uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader;
- this.prevBlockOffset = prevBlockOffset;
- this.offset = offset;
- this.onDiskDataSizeWithHeader = onDiskDataSizeWithHeader;
- this.nextBlockOnDiskSize = nextBlockOnDiskSize;
- this.fileContext = fileContext;
- this.allocator = allocator;
+ return new HFileBlockBuilder()
+ .withBlockType(blockType)
+ .withOnDiskSizeWithoutHeader(onDiskSizeWithoutHeader)
+ .withUncompressedSizeWithoutHeader(uncompressedSizeWithoutHeader)
+ .withPrevBlockOffset(prevBlockOffset)
+ .withOffset(offset)
+ .withOnDiskDataSizeWithHeader(onDiskDataSizeWithHeader)
+ .withNextBlockOnDiskSize(nextBlockOnDiskSize)
+ .withHFileContext(fileContext)
+ .withByteBuffAllocator(allocator)
+ .withByteBuff(buf.rewind())
+ .withShared(!buf.hasArray())
+ .build();
}
/**
@@ -639,7 +614,7 @@ public class HFileBlock implements Cacheable {
.append("(").append(onDiskSizeWithoutHeader)
.append("+").append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")");
}
- String dataBegin = null;
+ String dataBegin;
if (buf.hasArray()) {
dataBegin = Bytes.toStringBinary(buf.array(), buf.arrayOffset() + headerSize(),
Math.min(32, buf.limit() - buf.arrayOffset() - headerSize()));
@@ -673,7 +648,7 @@ public class HFileBlock implements Cacheable {
return this;
}
- HFileBlock unpacked = new HFileBlock(this);
+ HFileBlock unpacked = shallowClone(this);
unpacked.allocateBuffer(); // allocates space for the decompressed block
boolean succ = false;
try {
@@ -761,10 +736,16 @@ public class HFileBlock implements Cacheable {
}
/**
- * @return true to indicate the block is allocated from JVM heap, otherwise from off-heap.
+ * Will be override by {@link SharedMemHFileBlock} or {@link ExclusiveMemHFileBlock}. Return true
+ * by default.
*/
- boolean isOnHeap() {
- return buf.hasArray();
+ public boolean isSharedMem() {
+ if (this instanceof SharedMemHFileBlock) {
+ return true;
+ } else if (this instanceof ExclusiveMemHFileBlock) {
+ return false;
+ }
+ return true;
}
/**
@@ -1039,8 +1020,7 @@ public class HFileBlock implements Cacheable {
+ offset);
}
startOffset = offset;
-
- finishBlockAndWriteHeaderAndData((DataOutputStream) out);
+ finishBlockAndWriteHeaderAndData(out);
}
/**
@@ -1251,13 +1231,27 @@ public class HFileBlock implements Cacheable {
.withIncludesMvcc(fileContext.isIncludesMvcc())
.withIncludesTags(fileContext.isIncludesTags())
.build();
- return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
- getUncompressedSizeWithoutHeader(), prevOffset,
- cacheConf.shouldCacheCompressed(blockType.getCategory()) ? cloneOnDiskBufferWithHeader()
- : cloneUncompressedBufferWithHeader(),
- FILL_HEADER, startOffset, UNSET,
- onDiskBlockBytesWithHeader.size() + onDiskChecksum.length, newContext,
- cacheConf.getByteBuffAllocator());
+ // Build the HFileBlock.
+ HFileBlockBuilder builder = new HFileBlockBuilder();
+ ByteBuffer buffer;
+ if (cacheConf.shouldCacheCompressed(blockType.getCategory())) {
+ buffer = cloneOnDiskBufferWithHeader();
+ } else {
+ buffer = cloneUncompressedBufferWithHeader();
+ }
+ return builder.withBlockType(blockType)
+ .withOnDiskSizeWithoutHeader(getOnDiskSizeWithoutHeader())
+ .withUncompressedSizeWithoutHeader(getUncompressedSizeWithoutHeader())
+ .withPrevBlockOffset(prevOffset)
+ .withByteBuff(ByteBuff.wrap(buffer))
+ .withFillHeader(FILL_HEADER)
+ .withOffset(startOffset)
+ .withNextBlockOnDiskSize(UNSET)
+ .withOnDiskDataSizeWithHeader(onDiskBlockBytesWithHeader.size() + onDiskChecksum.length)
+ .withHFileContext(newContext)
+ .withByteBuffAllocator(cacheConf.getByteBuffAllocator())
+ .withShared(!buffer.hasArray())
+ .build();
}
}
@@ -1781,8 +1775,8 @@ public class HFileBlock implements Cacheable {
// The onDiskBlock will become the headerAndDataBuffer for this block.
// If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
// contains the header of next block, so no need to set next block's header in it.
- HFileBlock hFileBlock = new HFileBlock(curBlock, checksumSupport, offset,
- nextBlockOnDiskSize, fileContext, intoHeap ? HEAP : allocator);
+ HFileBlock hFileBlock = createFromBuff(curBlock, checksumSupport, offset,
+ nextBlockOnDiskSize, fileContext, intoHeap ? HEAP : allocator);
// Run check on uncompressed sizings.
if (!fileContext.isCompressedOrEncrypted()) {
hFileBlock.sanityCheckUncompressed();
@@ -1947,7 +1941,7 @@ public class HFileBlock implements Cacheable {
if (comparison == null) {
return false;
}
- if (comparison.getClass() != this.getClass()) {
+ if (!(comparison instanceof HFileBlock)) {
return false;
}
@@ -2084,7 +2078,27 @@ public class HFileBlock implements Cacheable {
" onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
}
- public HFileBlock deepCloneOnHeap() {
- return new HFileBlock(this, true);
+ private static HFileBlockBuilder createBuilder(HFileBlock blk){
+ return new HFileBlockBuilder()
+ .withBlockType(blk.blockType)
+ .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader)
+ .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader)
+ .withPrevBlockOffset(blk.prevBlockOffset)
+ .withByteBuff(blk.buf.duplicate()) // Duplicate the buffer.
+ .withOffset(blk.offset)
+ .withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader)
+ .withNextBlockOnDiskSize(blk.nextBlockOnDiskSize)
+ .withHFileContext(blk.fileContext)
+ .withByteBuffAllocator(blk.allocator)
+ .withShared(blk.isSharedMem());
+ }
+
+ static HFileBlock shallowClone(HFileBlock blk) {
+ return createBuilder(blk).build();
+ }
+
+ static HFileBlock deepCloneOnHeap(HFileBlock blk) {
+ ByteBuff deepCloned = ByteBuff.wrap(ByteBuffer.wrap(blk.buf.toBytes(0, blk.buf.limit())));
+ return createBuilder(blk).withByteBuff(deepCloned).withShared(false).build();
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java
new file mode 100644
index 00000000000..4ed50e1cdb7
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import static javax.swing.Spring.UNSET;
+import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP;
+
+import org.apache.hadoop.hbase.io.ByteBuffAllocator;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class HFileBlockBuilder {
+
+ private BlockType blockType;
+ private int onDiskSizeWithoutHeader;
+ private int onDiskDataSizeWithHeader;
+ private int uncompressedSizeWithoutHeader;
+ private long prevBlockOffset;
+ private ByteBuff buf;
+ private boolean fillHeader = false;
+ private long offset = UNSET;
+ private int nextBlockOnDiskSize = UNSET;
+ private HFileContext fileContext;
+ private ByteBuffAllocator allocator = HEAP;
+ private boolean isShared;
+
+ public HFileBlockBuilder withBlockType(BlockType blockType) {
+ this.blockType = blockType;
+ return this;
+ }
+
+ public HFileBlockBuilder withOnDiskSizeWithoutHeader(int onDiskSizeWithoutHeader) {
+ this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader;
+ return this;
+ }
+
+ public HFileBlockBuilder withOnDiskDataSizeWithHeader(int onDiskDataSizeWithHeader) {
+ this.onDiskDataSizeWithHeader = onDiskDataSizeWithHeader;
+ return this;
+ }
+
+ public HFileBlockBuilder withUncompressedSizeWithoutHeader(int uncompressedSizeWithoutHeader) {
+ this.uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader;
+ return this;
+ }
+
+ public HFileBlockBuilder withPrevBlockOffset(long prevBlockOffset) {
+ this.prevBlockOffset = prevBlockOffset;
+ return this;
+ }
+
+ public HFileBlockBuilder withByteBuff(ByteBuff buf) {
+ this.buf = buf;
+ return this;
+ }
+
+ public HFileBlockBuilder withFillHeader(boolean fillHeader) {
+ this.fillHeader = fillHeader;
+ return this;
+ }
+
+ public HFileBlockBuilder withOffset(long offset) {
+ this.offset = offset;
+ return this;
+ }
+
+ public HFileBlockBuilder withNextBlockOnDiskSize(int nextBlockOnDiskSize) {
+ this.nextBlockOnDiskSize = nextBlockOnDiskSize;
+ return this;
+ }
+
+ public HFileBlockBuilder withHFileContext(HFileContext fileContext) {
+ this.fileContext = fileContext;
+ return this;
+ }
+
+ public HFileBlockBuilder withByteBuffAllocator(ByteBuffAllocator allocator) {
+ this.allocator = allocator;
+ return this;
+ }
+
+ public HFileBlockBuilder withShared(boolean isShared) {
+ this.isShared = isShared;
+ return this;
+ }
+
+ public HFileBlock build() {
+ if (isShared) {
+ return new SharedMemHFileBlock(blockType, onDiskSizeWithoutHeader,
+ uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset,
+ nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, allocator);
+ } else {
+ return new ExclusiveMemHFileBlock(blockType, onDiskSizeWithoutHeader,
+ uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset,
+ nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext);
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 11576157eaa..0dae13cde97 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -523,15 +523,15 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
if (block != null && curBlock != null && block.getOffset() == curBlock.getOffset()) {
return;
}
- if (this.curBlock != null) {
+ if (this.curBlock != null && this.curBlock.isSharedMem()) {
prevBlocks.add(this.curBlock);
}
this.curBlock = block;
}
void reset() {
- // We don't have to keep ref to EXCLUSIVE type of block
- if (this.curBlock != null) {
+ // We don't have to keep ref to heap block
+ if (this.curBlock != null && this.curBlock.isSharedMem()) {
this.prevBlocks.add(this.curBlock);
}
this.curBlock = null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 0ec73a30704..7740460afcf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -372,8 +372,8 @@ public class LruBlockCache implements FirstLevelBlockCache {
private Cacheable asReferencedHeapBlock(Cacheable buf) {
if (buf instanceof HFileBlock) {
HFileBlock blk = ((HFileBlock) buf);
- if (!blk.isOnHeap()) {
- return blk.deepCloneOnHeap();
+ if (blk.isSharedMem()) {
+ return HFileBlock.deepCloneOnHeap(blk);
}
}
// The block will be referenced by this LRUBlockCache, so should increase its refCnt here.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java
new file mode 100644
index 00000000000..0d2217e1579
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import org.apache.hadoop.hbase.io.ByteBuffAllocator;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The {@link ByteBuffAllocator} won't allocate pooled heap {@link ByteBuff} now; at the same time,
+ * if allocate an off-heap {@link ByteBuff} from allocator, then it must be a pooled one. That's to
+ * say, an exclusive memory HFileBlock would must be an heap block and a shared memory HFileBlock
+ * would must be an off-heap block.
+ * @see org.apache.hadoop.hbase.io.hfile.ExclusiveMemHFileBlock
+ **/
+@InterfaceAudience.Private
+public class SharedMemHFileBlock extends HFileBlock {
+
+ SharedMemHFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
+ int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader,
+ long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext,
+ ByteBuffAllocator alloc) {
+ super(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, buf,
+ fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, alloc);
+ }
+
+ @Override
+ public boolean isSharedMem() {
+ return true;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java
index 5e69f6c9234..a90c5a33db6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java
@@ -171,8 +171,8 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache {
if (victimCache != null) {
value = victimCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
if ((value != null) && caching) {
- if ((value instanceof HFileBlock) && !((HFileBlock) value).isOnHeap()) {
- value = ((HFileBlock) value).deepCloneOnHeap();
+ if ((value instanceof HFileBlock) && ((HFileBlock) value).isSharedMem()) {
+ value = HFileBlock.deepCloneOnHeap((HFileBlock) value);
}
cacheBlock(cacheKey, value);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
index 71ffb87c266..108de7037d3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
@@ -43,10 +43,12 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.io.hfile.ExclusiveMemHFileBlock;
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.io.hfile.LruCachedBlock;
+import org.apache.hadoop.hbase.io.hfile.SharedMemHFileBlock;
import org.apache.hadoop.hbase.regionserver.CSLMImmutableSegment;
import org.apache.hadoop.hbase.regionserver.CellArrayImmutableSegment;
import org.apache.hadoop.hbase.regionserver.CellArrayMap;
@@ -529,6 +531,14 @@ public class TestHeapSize {
actual = HFileBlock.FIXED_OVERHEAD;
expected = ClassSize.estimateBase(HFileBlock.class, false);
assertEquals(expected, actual);
+
+ actual = ExclusiveMemHFileBlock.FIXED_OVERHEAD;
+ expected = ClassSize.estimateBase(ExclusiveMemHFileBlock.class, false);
+ assertEquals(expected, actual);
+
+ actual = SharedMemHFileBlock.FIXED_OVERHEAD;
+ expected = ClassSize.estimateBase(SharedMemHFileBlock.class, false);
+ assertEquals(expected, actual);
}
@Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
index 717e9d758da..a7bb8e6e648 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
@@ -148,7 +148,6 @@ public class CacheTestUtils {
if (buf != null) {
assertEquals(block.block, buf);
}
-
}
// Re-add some duplicate blocks. Hope nothing breaks.
@@ -307,10 +306,11 @@ public class CacheTestUtils {
.withBytesPerCheckSum(0)
.withChecksumType(ChecksumType.NULL)
.build();
- HFileBlock generated = new HFileBlock(BlockType.DATA, onDiskSizeWithoutHeader,
- uncompressedSizeWithoutHeader, prevBlockOffset, cachedBuffer, HFileBlock.DONT_FILL_HEADER,
- blockSize, onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE, -1, meta,
- ByteBuffAllocator.HEAP);
+ HFileBlock generated =
+ new HFileBlock(BlockType.DATA, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
+ prevBlockOffset, ByteBuff.wrap(cachedBuffer), HFileBlock.DONT_FILL_HEADER, blockSize,
+ onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE, -1, meta,
+ ByteBuffAllocator.HEAP);
String strKey;
/* No conflicting keys */
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
index 2aebc8ceabe..6d02854000e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
@@ -101,7 +101,7 @@ public class TestChecksum {
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, totalSize, (HFileSystem) fs, path,
meta, ByteBuffAllocator.HEAP);
HFileBlock b = hbr.readBlockData(0, -1, false, false, true);
- assertTrue(b.isOnHeap());
+ assertTrue(!b.isSharedMem());
assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
}
@@ -148,7 +148,7 @@ public class TestChecksum {
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, totalSize, (HFileSystem) fs, path,
meta, ByteBuffAllocator.HEAP);
HFileBlock b = hbr.readBlockData(0, -1, false, false, true);
- assertTrue(b.isOnHeap());
+ assertTrue(!b.isSharedMem());
// verify SingleByteBuff checksum.
verifySBBCheckSum(b.getBufferReadOnly());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
index 84e24e6d20f..a504442d06e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
@@ -169,7 +169,7 @@ public class TestHFile {
Cacheable cachedBlock = lru.getBlock(key, false, false, true);
Assert.assertNotNull(cachedBlock);
Assert.assertTrue(cachedBlock instanceof HFileBlock);
- Assert.assertTrue(((HFileBlock) cachedBlock).isOnHeap());
+ Assert.assertFalse(((HFileBlock) cachedBlock).isSharedMem());
// Should never allocate off-heap block from allocator because ensure that it's LRU.
Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
block.release(); // return back the ByteBuffer back to allocator.
@@ -217,10 +217,10 @@ public class TestHFile {
HFileBlock hfb = (HFileBlock) cachedBlock;
// Data block will be cached in BucketCache, so it should be an off-heap block.
if (hfb.getBlockType().isData()) {
- Assert.assertFalse(hfb.isOnHeap());
+ Assert.assertTrue(hfb.isSharedMem());
} else {
// Non-data block will be cached in LRUBlockCache, so it must be an on-heap block.
- Assert.assertTrue(hfb.isOnHeap());
+ Assert.assertFalse(hfb.isSharedMem());
}
} finally {
cachedBlock.release();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
index de10cede222..d0e98fd71a9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
@@ -340,6 +340,14 @@ public class TestHFileBlock {
testReaderV2Internals();
}
+ private void assertRelease(HFileBlock blk) {
+ if (blk instanceof ExclusiveMemHFileBlock) {
+ assertFalse(blk.release());
+ } else {
+ assertTrue(blk.release());
+ }
+ }
+
protected void testReaderV2Internals() throws IOException {
if(includesTag) {
TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
@@ -403,10 +411,10 @@ public class TestHFileBlock {
+ "'.\nMessage is expected to start with: '" + expectedPrefix
+ "'", ex.getMessage().startsWith(expectedPrefix));
}
- assertTrue(b.release());
+ assertRelease(b);
is.close();
}
- assertTrue(expected.release());
+ assertRelease(expected);
}
}
}
@@ -534,7 +542,7 @@ public class TestHFileBlock {
deserialized.unpack(meta, hbr));
}
}
- assertTrue(blockUnpacked.release());
+ assertRelease(blockUnpacked);
if (blockFromHFile != blockUnpacked) {
blockFromHFile.release();
}
@@ -651,7 +659,7 @@ public class TestHFileBlock {
assertEquals(b.getOnDiskDataSizeWithHeader(),
b2.getOnDiskDataSizeWithHeader());
assertEquals(0, HFile.getAndResetChecksumFailuresCount());
- assertTrue(b2.release());
+ assertRelease(b2);
curOffset += b.getOnDiskSizeWithHeader();
@@ -694,12 +702,12 @@ public class TestHFileBlock {
}
}
assertTrue(wrongBytesMsg, bytesAreCorrect);
- assertTrue(newBlock.release());
+ assertRelease(newBlock);
if (newBlock != b) {
- assertTrue(b.release());
+ assertRelease(b);
}
} else {
- assertTrue(b.release());
+ assertRelease(b);
}
}
assertEquals(curOffset, fs.getFileStatus(path).getLen());
@@ -750,9 +758,9 @@ public class TestHFileBlock {
long onDiskSizeArg = withOnDiskSize ? expectedSize : -1;
b = hbr.readBlockData(offset, onDiskSizeArg, pread, false, false);
if (useHeapAllocator) {
- assertTrue(b.isOnHeap());
+ assertTrue(!b.isSharedMem());
} else {
- assertTrue(!b.getBlockType().isData() || !b.isOnHeap());
+ assertTrue(!b.getBlockType().isData() || b.isSharedMem());
}
assertEquals(types.get(blockId), b.getBlockType());
assertEquals(expectedSize, b.getOnDiskSizeWithHeader());
@@ -913,14 +921,13 @@ public class TestHFileBlock {
.withCompression(Algorithm.NONE)
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
.withChecksumType(ChecksumType.NULL).build();
- HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf, HFileBlock.FILL_HEADER,
- -1, 0, -1, meta, HEAP);
- long byteBufferExpectedSize = ClassSize.align(ClassSize.estimateBase(
- new MultiByteBuff(buf).getClass(), true)
- + HConstants.HFILEBLOCK_HEADER_SIZE + size);
- long hfileMetaSize = ClassSize.align(ClassSize.estimateBase(HFileContext.class, true));
- long hfileBlockExpectedSize =
- ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
+ HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
+ HFileBlock.FILL_HEADER, -1, 0, -1, meta, HEAP);
+ long byteBufferExpectedSize =
+ ClassSize.align(ClassSize.estimateBase(new MultiByteBuff(buf).getClass(), true)
+ + HConstants.HFILEBLOCK_HEADER_SIZE + size);
+ long hfileMetaSize = ClassSize.align(ClassSize.estimateBase(HFileContext.class, true));
+ long hfileBlockExpectedSize = ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
long expected = hfileBlockExpectedSize + byteBufferExpectedSize + hfileMetaSize;
assertEquals("Block data size: " + size + ", byte buffer expected " +
"size: " + byteBufferExpectedSize + ", HFileBlock class expected " +
@@ -936,10 +943,10 @@ public class TestHFileBlock {
byte[] byteArr = new byte[length];
ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
HFileContext meta = new HFileContextBuilder().build();
- HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, buf,
- HFileBlock.FILL_HEADER, -1, 52, -1, meta, alloc);
- HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, buf,
- HFileBlock.FILL_HEADER, -1, -1, -1, meta, alloc);
+ HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1,
+ ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 52, -1, meta, alloc);
+ HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1,
+ ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1, -1, meta, alloc);
ByteBuffer buff1 = ByteBuffer.allocate(length);
ByteBuffer buff2 = ByteBuffer.allocate(length);
blockWithNextBlockMetadata.serialize(buff1, true);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
index 5a6042c0ae8..2f249c8b1dc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.testclassification.IOTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.ChecksumType;
@@ -132,8 +133,8 @@ public class TestHFileDataBlockEncoder {
.withBlockSize(0)
.withChecksumType(ChecksumType.NULL)
.build();
- HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf, HFileBlock.FILL_HEADER,
- 0, 0, -1, hfileContext, ByteBuffAllocator.HEAP);
+ HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
+ HFileBlock.FILL_HEADER, 0, 0, -1, hfileContext, ByteBuffAllocator.HEAP);
HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTags);
assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
}
@@ -198,9 +199,8 @@ public class TestHFileDataBlockEncoder {
.withBlockSize(0)
.withChecksumType(ChecksumType.NULL)
.build();
- HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
- HFileBlock.FILL_HEADER, 0,
- 0, -1, meta, ByteBuffAllocator.HEAP);
+ HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
+ HFileBlock.FILL_HEADER, 0, 0, -1, meta, ByteBuffAllocator.HEAP);
return b;
}
@@ -221,9 +221,9 @@ public class TestHFileDataBlockEncoder {
blockEncoder.endBlockEncoding(context, dos, baos.getBuffer(), BlockType.DATA);
byte[] encodedBytes = baos.toByteArray();
size = encodedBytes.length - block.getDummyHeaderForVersion().length;
- return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes),
- HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), -1,
- block.getHFileContext(), ByteBuffAllocator.HEAP);
+ return new HFileBlock(context.getBlockType(), size, size, -1,
+ ByteBuff.wrap(ByteBuffer.wrap(encodedBytes)), HFileBlock.FILL_HEADER, 0,
+ block.getOnDiskDataSizeWithHeader(), -1, block.getHFileContext(), ByteBuffAllocator.HEAP);
}
private void writeBlock(List