HubSpot Backport: HBASE-27170 ByteBuffAllocator leak when decompressing blocks near minSizeForReservoirUse

This commit is contained in:
Bryan Beaudreault 2022-07-01 12:33:56 -04:00
parent 6867507ba6
commit 588ad6b6eb

View File

@ -636,8 +636,8 @@ public class HFileBlock implements Cacheable {
return this; return this;
} }
HFileBlock unpacked = shallowClone(this); ByteBuff newBuf = allocateBuffer(); // allocates space for the decompressed block
unpacked.allocateBuffer(); // allocates space for the decompressed block HFileBlock unpacked = shallowClone(this, newBuf);
boolean succ = false; boolean succ = false;
try { try {
HFileBlockDecodingContext ctx = blockType == BlockType.ENCODED_DATA HFileBlockDecodingContext ctx = blockType == BlockType.ENCODED_DATA
@ -663,20 +663,21 @@ public class HFileBlock implements Cacheable {
* from the existing buffer. Does not change header fields. * from the existing buffer. Does not change header fields.
* Reserve room to keep checksum bytes too. * Reserve room to keep checksum bytes too.
*/ */
private void allocateBuffer() { private ByteBuff allocateBuffer() {
int cksumBytes = totalChecksumBytes(); int cksumBytes = totalChecksumBytes();
int headerSize = headerSize(); int headerSize = headerSize();
int capacityNeeded = headerSize + uncompressedSizeWithoutHeader + cksumBytes; int capacityNeeded = headerSize + uncompressedSizeWithoutHeader + cksumBytes;
ByteBuff source = buf.duplicate();
ByteBuff newBuf = allocator.allocate(capacityNeeded); ByteBuff newBuf = allocator.allocate(capacityNeeded);
// Copy header bytes into newBuf. // Copy header bytes into newBuf.
buf.position(0); source.position(0);
newBuf.put(0, buf, 0, headerSize); newBuf.put(0, source, 0, headerSize);
buf = newBuf;
// set limit to exclude next block's header // set limit to exclude next block's header
buf.limit(capacityNeeded); newBuf.limit(capacityNeeded);
return newBuf;
} }
/** /**
@ -2060,27 +2061,23 @@ public class HFileBlock implements Cacheable {
" onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader; " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
} }
private static HFileBlockBuilder createBuilder(HFileBlock blk){ private static HFileBlockBuilder createBuilder(HFileBlock blk, ByteBuff newBuff) {
return new HFileBlockBuilder() return new HFileBlockBuilder().withBlockType(blk.blockType)
.withBlockType(blk.blockType)
.withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader) .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader)
.withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader) .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader)
.withPrevBlockOffset(blk.prevBlockOffset) .withPrevBlockOffset(blk.prevBlockOffset).withByteBuff(newBuff).withOffset(blk.offset)
.withByteBuff(blk.buf.duplicate()) // Duplicate the buffer.
.withOffset(blk.offset)
.withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader) .withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader)
.withNextBlockOnDiskSize(blk.nextBlockOnDiskSize) .withNextBlockOnDiskSize(blk.nextBlockOnDiskSize).withHFileContext(blk.fileContext)
.withHFileContext(blk.fileContext) .withByteBuffAllocator(blk.allocator).withShared(!newBuff.hasArray());
.withByteBuffAllocator(blk.allocator)
.withShared(blk.isSharedMem());
} }
static HFileBlock shallowClone(HFileBlock blk) { static HFileBlock shallowClone(HFileBlock blk, ByteBuff newBuf) {
return createBuilder(blk).build(); return createBuilder(blk, newBuf).build();
} }
static HFileBlock deepCloneOnHeap(HFileBlock blk) { static HFileBlock deepCloneOnHeap(HFileBlock blk) {
ByteBuff deepCloned = ByteBuff.wrap(ByteBuffer.wrap(blk.buf.toBytes(0, blk.buf.limit()))); ByteBuff deepCloned = ByteBuff.wrap(ByteBuffer.wrap(blk.buf.toBytes(0, blk.buf.limit())));
return createBuilder(blk).withByteBuff(deepCloned).withShared(false).build(); return createBuilder(blk, blk.buf.duplicate()).withByteBuff(deepCloned).withShared(false)
.build();
} }
} }