HBASE-17201 Edit of HFileBlock comments and javadoc
This commit is contained in:
parent
619d6a50f6
commit
b71509151e
|
@ -57,12 +57,12 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reads {@link HFile} version 2 blocks to HFiles and via {@link Cacheable} Interface to caches.
|
* Cacheable Blocks of an {@link HFile} version 2 file.
|
||||||
* Version 2 was introduced in hbase-0.92.0. No longer has support for version 1 blocks since
|
* Version 2 was introduced in hbase-0.92.0.
|
||||||
* hbase-1.3.0.
|
|
||||||
*
|
*
|
||||||
* <p>Version 1 was the original file block. Version 2 was introduced when we changed the hbase file
|
* <p>Version 1 was the original file block. Version 2 was introduced when we changed the hbase file
|
||||||
* format to support multi-level block indexes and compound bloom filters (HBASE-3857).
|
* format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support
|
||||||
|
* for Version 1 was removed in hbase-1.3.0.
|
||||||
*
|
*
|
||||||
* <h3>HFileBlock: Version 2</h3>
|
* <h3>HFileBlock: Version 2</h3>
|
||||||
* In version 2, a block is structured as follows:
|
* In version 2, a block is structured as follows:
|
||||||
|
@ -112,6 +112,28 @@ import com.google.common.base.Preconditions;
|
||||||
public class HFileBlock implements Cacheable {
|
public class HFileBlock implements Cacheable {
|
||||||
private static final Log LOG = LogFactory.getLog(HFileBlock.class);
|
private static final Log LOG = LogFactory.getLog(HFileBlock.class);
|
||||||
|
|
||||||
|
// Block Header fields.
|
||||||
|
|
||||||
|
// TODO: encapsulate Header related logic in this inner class.
|
||||||
|
static class Header {
|
||||||
|
// Format of header is:
|
||||||
|
// 8 bytes - block magic
|
||||||
|
// 4 bytes int - onDiskSizeWithoutHeader
|
||||||
|
// 4 bytes int - uncompressedSizeWithoutHeader
|
||||||
|
// 8 bytes long - prevBlockOffset
|
||||||
|
// The following 3 are only present if header contains checksum information
|
||||||
|
// 1 byte - checksum type
|
||||||
|
// 4 byte int - bytes per checksum
|
||||||
|
// 4 byte int - onDiskDataSizeWithHeader
|
||||||
|
static int BLOCK_MAGIC_INDEX = 0;
|
||||||
|
static int ON_DISK_SIZE_WITHOUT_HEADER_INDEX = 8;
|
||||||
|
static int UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX = 12;
|
||||||
|
static int PREV_BLOCK_OFFSET_INDEX = 16;
|
||||||
|
static int CHECKSUM_TYPE_INDEX = 24;
|
||||||
|
static int BYTES_PER_CHECKSUM_INDEX = 25;
|
||||||
|
static int ON_DISK_DATA_SIZE_WITH_HEADER_INDEX = 29;
|
||||||
|
}
|
||||||
|
|
||||||
/** Type of block. Header field 0. */
|
/** Type of block. Header field 0. */
|
||||||
private BlockType blockType;
|
private BlockType blockType;
|
||||||
|
|
||||||
|
@ -139,7 +161,7 @@ public class HFileBlock implements Cacheable {
|
||||||
* @see Writer#putHeader(byte[], int, int, int, int)
|
* @see Writer#putHeader(byte[], int, int, int, int)
|
||||||
*/
|
*/
|
||||||
private int onDiskDataSizeWithHeader;
|
private int onDiskDataSizeWithHeader;
|
||||||
|
// End of Block Header fields.
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The in-memory representation of the hfile block. Can be on or offheap. Can be backed by
|
* The in-memory representation of the hfile block. Can be on or offheap. Can be backed by
|
||||||
|
@ -170,15 +192,14 @@ public class HFileBlock implements Cacheable {
|
||||||
private MemoryType memType = MemoryType.EXCLUSIVE;
|
private MemoryType memType = MemoryType.EXCLUSIVE;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The on-disk size of the next block, including the header and checksums if present, obtained by
|
* The on-disk size of the next block, including the header and checksums if present.
|
||||||
* peeking into the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the next block's
|
* UNSET if unknown.
|
||||||
* header, or UNSET if unknown.
|
|
||||||
*
|
*
|
||||||
* Blocks try to carry the size of the next block to read in this data member. They will even have
|
* Blocks try to carry the size of the next block to read in this data member. Usually
|
||||||
* this value when served from cache. Could save a seek in the case where we are iterating through
|
* we get block sizes from the hfile index but sometimes the index is not available:
|
||||||
* a file and some of the blocks come from cache. If from cache, then having this info to hand
|
* e.g. when we read the indexes themselves (indexes are stored in blocks, we do not
|
||||||
* will save us doing a seek to read the header so we can read the body of a block.
|
* have an index for the indexes). Saves seeks especially around file open when
|
||||||
* TODO: see how effective this is at saving seeks.
|
* there is a flurry of reading in hfile metadata.
|
||||||
*/
|
*/
|
||||||
private int nextBlockOnDiskSize = UNSET;
|
private int nextBlockOnDiskSize = UNSET;
|
||||||
|
|
||||||
|
@ -198,15 +219,15 @@ public class HFileBlock implements Cacheable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Space for metadata on a block that gets stored along with the block when we cache it.
|
* Space for metadata on a block that gets stored along with the block when we cache it.
|
||||||
* There are a few bytes stuck on the end of the HFileBlock that we pull in from HDFS (note,
|
* There are a few bytes stuck on the end of the HFileBlock that we pull in from HDFS.
|
||||||
* when we read from HDFS, we pull in an HFileBlock AND the header of the next block if one).
|
* 8 bytes are for the offset of this block (long) in the file. Offset is important because is is
|
||||||
* 8 bytes are offset of this block (long) in the file. Offset is important because
|
* used when we remake the CacheKey when we return block to the cache when done. There is also
|
||||||
* used when we remake the CacheKey when we return the block to cache when done. There is also
|
|
||||||
* a flag on whether checksumming is being done by hbase or not. See class comment for note on
|
* a flag on whether checksumming is being done by hbase or not. See class comment for note on
|
||||||
* uncertain state of checksumming of blocks that come out of cache (should we or should we not?).
|
* uncertain state of checksumming of blocks that come out of cache (should we or should we not?).
|
||||||
* Finally there 4 bytes to hold the length of the next block which can save a seek on occasion.
|
* Finally there are 4 bytes to hold the length of the next block which can save a seek on
|
||||||
* <p>This EXTRA came in with original commit of the bucketcache, HBASE-7404. Was formerly
|
* occasion if available.
|
||||||
* known as EXTRA_SERIALIZATION_SPACE.
|
* (This EXTRA info came in with original commit of the bucketcache, HBASE-7404. It was
|
||||||
|
* formerly known as EXTRA_SERIALIZATION_SPACE).
|
||||||
*/
|
*/
|
||||||
static final int BLOCK_METADATA_SPACE = Bytes.SIZEOF_BYTE + Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT;
|
static final int BLOCK_METADATA_SPACE = Bytes.SIZEOF_BYTE + Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT;
|
||||||
|
|
||||||
|
@ -227,7 +248,7 @@ public class HFileBlock implements Cacheable {
|
||||||
* ++++++++++++++
|
* ++++++++++++++
|
||||||
* + Checksums + <= Optional
|
* + Checksums + <= Optional
|
||||||
* ++++++++++++++
|
* ++++++++++++++
|
||||||
* + Metadata! +
|
* + Metadata! + <= See note on BLOCK_METADATA_SPACE above.
|
||||||
* ++++++++++++++
|
* ++++++++++++++
|
||||||
* </code>
|
* </code>
|
||||||
* @see #serialize(ByteBuffer)
|
* @see #serialize(ByteBuffer)
|
||||||
|
@ -277,26 +298,6 @@ public class HFileBlock implements Cacheable {
|
||||||
CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER);
|
CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Todo: encapsulate Header related logic in this inner class.
|
|
||||||
static class Header {
|
|
||||||
// Format of header is:
|
|
||||||
// 8 bytes - block magic
|
|
||||||
// 4 bytes int - onDiskSizeWithoutHeader
|
|
||||||
// 4 bytes int - uncompressedSizeWithoutHeader
|
|
||||||
// 8 bytes long - prevBlockOffset
|
|
||||||
// The following 3 are only present if header contains checksum information
|
|
||||||
// 1 byte - checksum type
|
|
||||||
// 4 byte int - bytes per checksum
|
|
||||||
// 4 byte int - onDiskDataSizeWithHeader
|
|
||||||
static int BLOCK_MAGIC_INDEX = 0;
|
|
||||||
static int ON_DISK_SIZE_WITHOUT_HEADER_INDEX = 8;
|
|
||||||
static int UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX = 12;
|
|
||||||
static int PREV_BLOCK_OFFSET_INDEX = 16;
|
|
||||||
static int CHECKSUM_TYPE_INDEX = 24;
|
|
||||||
static int BYTES_PER_CHECKSUM_INDEX = 25;
|
|
||||||
static int ON_DISK_DATA_SIZE_WITH_HEADER_INDEX = 29;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Copy constructor. Creates a shallow copy of {@code that}'s buffer.
|
* Copy constructor. Creates a shallow copy of {@code that}'s buffer.
|
||||||
*/
|
*/
|
||||||
|
@ -308,20 +309,15 @@ public class HFileBlock implements Cacheable {
|
||||||
* Copy constructor. Creates a shallow/deep copy of {@code that}'s buffer as per the boolean
|
* Copy constructor. Creates a shallow/deep copy of {@code that}'s buffer as per the boolean
|
||||||
* param.
|
* param.
|
||||||
*/
|
*/
|
||||||
private HFileBlock(HFileBlock that,boolean bufCopy) {
|
private HFileBlock(HFileBlock that, boolean bufCopy) {
|
||||||
this.blockType = that.blockType;
|
init(that.blockType, that.onDiskSizeWithoutHeader,
|
||||||
this.onDiskSizeWithoutHeader = that.onDiskSizeWithoutHeader;
|
that.uncompressedSizeWithoutHeader, that.prevBlockOffset,
|
||||||
this.uncompressedSizeWithoutHeader = that.uncompressedSizeWithoutHeader;
|
that.offset, that.onDiskDataSizeWithHeader, that.nextBlockOnDiskSize, that.fileContext);
|
||||||
this.prevBlockOffset = that.prevBlockOffset;
|
|
||||||
if (bufCopy) {
|
if (bufCopy) {
|
||||||
this.buf = new SingleByteBuff(ByteBuffer.wrap(that.buf.toBytes(0, that.buf.limit())));
|
this.buf = new SingleByteBuff(ByteBuffer.wrap(that.buf.toBytes(0, that.buf.limit())));
|
||||||
} else {
|
} else {
|
||||||
this.buf = that.buf.duplicate();
|
this.buf = that.buf.duplicate();
|
||||||
}
|
}
|
||||||
this.offset = that.offset;
|
|
||||||
this.onDiskDataSizeWithHeader = that.onDiskDataSizeWithHeader;
|
|
||||||
this.fileContext = that.fileContext;
|
|
||||||
this.nextBlockOnDiskSize = that.nextBlockOnDiskSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -418,12 +414,13 @@ public class HFileBlock implements Cacheable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse total ondisk size including header and checksum.
|
* Parse total on disk size including header and checksum.
|
||||||
* @param headerBuf Header ByteBuffer. Presumed exact size of header.
|
* @param headerBuf Header ByteBuffer. Presumed exact size of header.
|
||||||
* @param verifyChecksum true if checksum verification is in use.
|
* @param verifyChecksum true if checksum verification is in use.
|
||||||
* @return Size of the block with header included.
|
* @return Size of the block with header included.
|
||||||
*/
|
*/
|
||||||
private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf, boolean verifyChecksum) {
|
private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf,
|
||||||
|
boolean verifyChecksum) {
|
||||||
return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) +
|
return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) +
|
||||||
headerSize(verifyChecksum);
|
headerSize(verifyChecksum);
|
||||||
}
|
}
|
||||||
|
@ -433,7 +430,7 @@ public class HFileBlock implements Cacheable {
|
||||||
* present) read by peeking into the next block's header; use as a hint when doing
|
* present) read by peeking into the next block's header; use as a hint when doing
|
||||||
* a read of the next block when scanning or running over a file.
|
* a read of the next block when scanning or running over a file.
|
||||||
*/
|
*/
|
||||||
public int getNextBlockOnDiskSize() {
|
int getNextBlockOnDiskSize() {
|
||||||
return nextBlockOnDiskSize;
|
return nextBlockOnDiskSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -443,7 +440,7 @@ public class HFileBlock implements Cacheable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return get data block encoding id that was used to encode this block */
|
/** @return get data block encoding id that was used to encode this block */
|
||||||
public short getDataBlockEncodingId() {
|
short getDataBlockEncodingId() {
|
||||||
if (blockType != BlockType.ENCODED_DATA) {
|
if (blockType != BlockType.ENCODED_DATA) {
|
||||||
throw new IllegalArgumentException("Querying encoder ID of a block " +
|
throw new IllegalArgumentException("Querying encoder ID of a block " +
|
||||||
"of type other than " + BlockType.ENCODED_DATA + ": " + blockType);
|
"of type other than " + BlockType.ENCODED_DATA + ": " + blockType);
|
||||||
|
@ -525,6 +522,7 @@ public class HFileBlock implements Cacheable {
|
||||||
return dup;
|
return dup;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
private void sanityCheckAssertion(long valueFromBuf, long valueFromField,
|
private void sanityCheckAssertion(long valueFromBuf, long valueFromField,
|
||||||
String fieldName) throws IOException {
|
String fieldName) throws IOException {
|
||||||
if (valueFromBuf != valueFromField) {
|
if (valueFromBuf != valueFromField) {
|
||||||
|
@ -533,6 +531,7 @@ public class HFileBlock implements Cacheable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
private void sanityCheckAssertion(BlockType valueFromBuf, BlockType valueFromField)
|
private void sanityCheckAssertion(BlockType valueFromBuf, BlockType valueFromField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (valueFromBuf != valueFromField) {
|
if (valueFromBuf != valueFromField) {
|
||||||
|
@ -687,7 +686,8 @@ public class HFileBlock implements Cacheable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** An additional sanity-check in case no compression or encryption is being used. */
|
/** An additional sanity-check in case no compression or encryption is being used. */
|
||||||
public void sanityCheckUncompressedSize() throws IOException {
|
@VisibleForTesting
|
||||||
|
void sanityCheckUncompressedSize() throws IOException {
|
||||||
if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) {
|
if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) {
|
||||||
throw new IOException("Using no compression but "
|
throw new IOException("Using no compression but "
|
||||||
+ "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", "
|
+ "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", "
|
||||||
|
@ -1326,7 +1326,6 @@ public class HFileBlock implements Cacheable {
|
||||||
|
|
||||||
/** Something that can be written into a block. */
|
/** Something that can be written into a block. */
|
||||||
interface BlockWritable {
|
interface BlockWritable {
|
||||||
|
|
||||||
/** The type of block this data should use. */
|
/** The type of block this data should use. */
|
||||||
BlockType getBlockType();
|
BlockType getBlockType();
|
||||||
|
|
||||||
|
@ -1339,11 +1338,8 @@ public class HFileBlock implements Cacheable {
|
||||||
void writeToBlock(DataOutput out) throws IOException;
|
void writeToBlock(DataOutput out) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Block readers and writers
|
/** Iterator for {@link HFileBlock}s. */
|
||||||
|
|
||||||
/** An interface allowing to iterate {@link HFileBlock}s. */
|
|
||||||
interface BlockIterator {
|
interface BlockIterator {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the next block, or null if there are no more blocks to iterate.
|
* Get the next block, or null if there are no more blocks to iterate.
|
||||||
*/
|
*/
|
||||||
|
@ -1356,9 +1352,8 @@ public class HFileBlock implements Cacheable {
|
||||||
HFileBlock nextBlockWithBlockType(BlockType blockType) throws IOException;
|
HFileBlock nextBlockWithBlockType(BlockType blockType) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** A full-fledged reader with iteration ability. */
|
/** An HFile block reader with iteration ability. */
|
||||||
interface FSReader {
|
interface FSReader {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reads the block at the given offset in the file with the given on-disk
|
* Reads the block at the given offset in the file with the given on-disk
|
||||||
* size and uncompressed size.
|
* size and uncompressed size.
|
||||||
|
@ -1375,6 +1370,8 @@ public class HFileBlock implements Cacheable {
|
||||||
* Creates a block iterator over the given portion of the {@link HFile}.
|
* Creates a block iterator over the given portion of the {@link HFile}.
|
||||||
* The iterator returns blocks starting with offset such that offset <=
|
* The iterator returns blocks starting with offset such that offset <=
|
||||||
* startOffset < endOffset. Returned blocks are always unpacked.
|
* startOffset < endOffset. Returned blocks are always unpacked.
|
||||||
|
* Used when no hfile index available; e.g. reading in the hfile index
|
||||||
|
* blocks themselves on file open.
|
||||||
*
|
*
|
||||||
* @param startOffset the offset of the block to start iteration with
|
* @param startOffset the offset of the block to start iteration with
|
||||||
* @param endOffset the offset to end iteration at (exclusive)
|
* @param endOffset the offset to end iteration at (exclusive)
|
||||||
|
@ -1406,10 +1403,8 @@ public class HFileBlock implements Cacheable {
|
||||||
* that comes in here is next in sequence in this block.
|
* that comes in here is next in sequence in this block.
|
||||||
*
|
*
|
||||||
* When we read, we read current block and the next blocks' header. We do this so we have
|
* When we read, we read current block and the next blocks' header. We do this so we have
|
||||||
* the length of the next block to read if the hfile index is not available (rare).
|
* the length of the next block to read if the hfile index is not available (rare, at
|
||||||
* TODO: Review!! This trick of reading next blocks header is a pain, complicates our
|
* hfile open only).
|
||||||
* read path and I don't think it needed given it rare we don't have the block index
|
|
||||||
* (it is 'normally' present, gotten from the hfile index). FIX!!!
|
|
||||||
*/
|
*/
|
||||||
private static class PrefetchedHeader {
|
private static class PrefetchedHeader {
|
||||||
long offset = -1;
|
long offset = -1;
|
||||||
|
@ -1423,7 +1418,7 @@ public class HFileBlock implements Cacheable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reads version 2 blocks from the filesystem.
|
* Reads version 2 HFile blocks from the filesystem.
|
||||||
*/
|
*/
|
||||||
static class FSReaderImpl implements FSReader {
|
static class FSReaderImpl implements FSReader {
|
||||||
/** The file system stream of the underlying {@link HFile} that
|
/** The file system stream of the underlying {@link HFile} that
|
||||||
|
@ -1711,15 +1706,15 @@ public class HFileBlock implements Cacheable {
|
||||||
/**
|
/**
|
||||||
* Reads a version 2 block.
|
* Reads a version 2 block.
|
||||||
*
|
*
|
||||||
* @param offset the offset in the stream to read at. Usually the
|
* @param offset the offset in the stream to read at.
|
||||||
* @param onDiskSizeWithHeaderL the on-disk size of the block, including
|
* @param onDiskSizeWithHeaderL the on-disk size of the block, including
|
||||||
* the header and checksums if present or -1 if unknown (as a long). Can be -1
|
* the header and checksums if present or -1 if unknown (as a long). Can be -1
|
||||||
* if we are doing raw iteration of blocks as when loading up file metadata; i.e.
|
* if we are doing raw iteration of blocks as when loading up file metadata; i.e.
|
||||||
* the first read of a new file (TODO: Fix! See HBASE-17072). Usually non-null gotten
|
* the first read of a new file. Usually non-null gotten from the file index.
|
||||||
* from the file index.
|
|
||||||
* @param pread whether to use a positional read
|
* @param pread whether to use a positional read
|
||||||
* @param verifyChecksum Whether to use HBase checksums.
|
* @param verifyChecksum Whether to use HBase checksums.
|
||||||
* If HBase checksum is switched off, then use HDFS checksum.
|
* If HBase checksum is switched off, then use HDFS checksum. Can also flip on/off
|
||||||
|
* reading same file if we hit a troublesome patch in an hfile.
|
||||||
* @return the HFileBlock or null if there is a HBase checksum mismatch
|
* @return the HFileBlock or null if there is a HBase checksum mismatch
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@ -1740,14 +1735,18 @@ public class HFileBlock implements Cacheable {
|
||||||
", pread=" + pread + ", verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
|
", pread=" + pread + ", verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
|
||||||
headerBuf + ", onDiskSizeWithHeader=" + onDiskSizeWithHeader);
|
headerBuf + ", onDiskSizeWithHeader=" + onDiskSizeWithHeader);
|
||||||
}
|
}
|
||||||
|
// This is NOT same as verifyChecksum. This latter is whether to do hbase
|
||||||
|
// checksums. Can change with circumstances. The below flag is whether the
|
||||||
|
// file has support for checksums (version 2+).
|
||||||
|
boolean checksumSupport = this.fileContext.isUseHBaseChecksum();
|
||||||
long startTime = System.currentTimeMillis();
|
long startTime = System.currentTimeMillis();
|
||||||
if (onDiskSizeWithHeader <= 0) {
|
if (onDiskSizeWithHeader <= 0) {
|
||||||
// We were not passed the block size. Need to get it from the header. If header was not in
|
// We were not passed the block size. Need to get it from the header. If header was
|
||||||
// cache, need to seek to pull it in. This is costly and should happen very rarely.
|
// not cached (see getCachedHeader above), need to seek to pull it in. This is costly
|
||||||
// Currently happens on open of a hfile reader where we read the trailer blocks for
|
// and should happen very rarely. Currently happens on open of a hfile reader where we
|
||||||
// indices. Otherwise, we are reading block sizes out of the hfile index. To check,
|
// read the trailer blocks to pull in the indices. Otherwise, we are reading block sizes
|
||||||
// enable TRACE in this file and you'll get an exception in a LOG every time we seek.
|
// out of the hfile index. To check, enable TRACE in this file and you'll get an exception
|
||||||
// See HBASE-17072 for more detail.
|
// in a LOG every time we seek. See HBASE-17072 for more detail.
|
||||||
if (headerBuf == null) {
|
if (headerBuf == null) {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("Extra see to get block size!", new RuntimeException());
|
LOG.trace("Extra see to get block size!", new RuntimeException());
|
||||||
|
@ -1756,8 +1755,7 @@ public class HFileBlock implements Cacheable {
|
||||||
readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
|
readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
|
||||||
offset, pread);
|
offset, pread);
|
||||||
}
|
}
|
||||||
onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf,
|
onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport);
|
||||||
this.fileContext.isUseHBaseChecksum());
|
|
||||||
}
|
}
|
||||||
int preReadHeaderSize = headerBuf == null? 0 : hdrSize;
|
int preReadHeaderSize = headerBuf == null? 0 : hdrSize;
|
||||||
// Allocate enough space to fit the next block's header too; saves a seek next time through.
|
// Allocate enough space to fit the next block's header too; saves a seek next time through.
|
||||||
|
@ -1765,8 +1763,7 @@ public class HFileBlock implements Cacheable {
|
||||||
// onDiskSizeWithHeader is header, body, and any checksums if present. preReadHeaderSize
|
// onDiskSizeWithHeader is header, body, and any checksums if present. preReadHeaderSize
|
||||||
// says where to start reading. If we have the header cached, then we don't need to read
|
// says where to start reading. If we have the header cached, then we don't need to read
|
||||||
// it again and we can likely read from last place we left off w/o need to backup and reread
|
// it again and we can likely read from last place we left off w/o need to backup and reread
|
||||||
// the header we read last time through here. TODO: Review this overread of the header. Is it necessary
|
// the header we read last time through here.
|
||||||
// when we get the block size from the hfile index? See note on PrefetchedHeader class above.
|
|
||||||
// TODO: Make this ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
|
// TODO: Make this ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
|
||||||
byte [] onDiskBlock = new byte[onDiskSizeWithHeader + hdrSize];
|
byte [] onDiskBlock = new byte[onDiskSizeWithHeader + hdrSize];
|
||||||
int nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, preReadHeaderSize,
|
int nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, preReadHeaderSize,
|
||||||
|
@ -1780,8 +1777,7 @@ public class HFileBlock implements Cacheable {
|
||||||
}
|
}
|
||||||
// Do a few checks before we go instantiate HFileBlock.
|
// Do a few checks before we go instantiate HFileBlock.
|
||||||
assert onDiskSizeWithHeader > this.hdrSize;
|
assert onDiskSizeWithHeader > this.hdrSize;
|
||||||
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset,
|
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, checksumSupport);
|
||||||
this.fileContext.isUseHBaseChecksum());
|
|
||||||
ByteBuffer onDiskBlockByteBuffer = ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
|
ByteBuffer onDiskBlockByteBuffer = ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
|
||||||
// Verify checksum of the data before using it for building HFileBlock.
|
// Verify checksum of the data before using it for building HFileBlock.
|
||||||
if (verifyChecksum &&
|
if (verifyChecksum &&
|
||||||
|
@ -1796,9 +1792,8 @@ public class HFileBlock implements Cacheable {
|
||||||
// If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
|
// If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
|
||||||
// contains the header of next block, so no need to set next block's header in it.
|
// contains the header of next block, so no need to set next block's header in it.
|
||||||
HFileBlock hFileBlock =
|
HFileBlock hFileBlock =
|
||||||
new HFileBlock(new SingleByteBuff(onDiskBlockByteBuffer),
|
new HFileBlock(new SingleByteBuff(onDiskBlockByteBuffer), checksumSupport,
|
||||||
this.fileContext.isUseHBaseChecksum(), MemoryType.EXCLUSIVE, offset,
|
MemoryType.EXCLUSIVE, offset, nextBlockOnDiskSize, fileContext);
|
||||||
nextBlockOnDiskSize, fileContext);
|
|
||||||
// Run check on uncompressed sizings.
|
// Run check on uncompressed sizings.
|
||||||
if (!fileContext.isCompressedOrEncrypted()) {
|
if (!fileContext.isCompressedOrEncrypted()) {
|
||||||
hFileBlock.sanityCheckUncompressed();
|
hFileBlock.sanityCheckUncompressed();
|
||||||
|
@ -1877,6 +1872,7 @@ public class HFileBlock implements Cacheable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** An additional sanity-check in case no compression or encryption is being used. */
|
/** An additional sanity-check in case no compression or encryption is being used. */
|
||||||
|
@VisibleForTesting
|
||||||
void sanityCheckUncompressed() throws IOException {
|
void sanityCheckUncompressed() throws IOException {
|
||||||
if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader +
|
if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader +
|
||||||
totalChecksumBytes()) {
|
totalChecksumBytes()) {
|
||||||
|
@ -1991,13 +1987,14 @@ public class HFileBlock implements Cacheable {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public DataBlockEncoding getDataBlockEncoding() {
|
DataBlockEncoding getDataBlockEncoding() {
|
||||||
if (blockType == BlockType.ENCODED_DATA) {
|
if (blockType == BlockType.ENCODED_DATA) {
|
||||||
return DataBlockEncoding.getEncodingById(getDataBlockEncodingId());
|
return DataBlockEncoding.getEncodingById(getDataBlockEncodingId());
|
||||||
}
|
}
|
||||||
return DataBlockEncoding.NONE;
|
return DataBlockEncoding.NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
byte getChecksumType() {
|
byte getChecksumType() {
|
||||||
return this.fileContext.getChecksumType().getCode();
|
return this.fileContext.getChecksumType().getCode();
|
||||||
}
|
}
|
||||||
|
@ -2007,6 +2004,7 @@ public class HFileBlock implements Cacheable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return the size of data on disk + header. Excludes checksum. */
|
/** @return the size of data on disk + header. Excludes checksum. */
|
||||||
|
@VisibleForTesting
|
||||||
int getOnDiskDataSizeWithHeader() {
|
int getOnDiskDataSizeWithHeader() {
|
||||||
return this.onDiskDataSizeWithHeader;
|
return this.onDiskDataSizeWithHeader;
|
||||||
}
|
}
|
||||||
|
@ -2045,6 +2043,8 @@ public class HFileBlock implements Cacheable {
|
||||||
/**
|
/**
|
||||||
* Return the appropriate DUMMY_HEADER for the minor version
|
* Return the appropriate DUMMY_HEADER for the minor version
|
||||||
*/
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
// TODO: Why is this in here?
|
||||||
byte[] getDummyHeaderForVersion() {
|
byte[] getDummyHeaderForVersion() {
|
||||||
return getDummyHeaderForVersion(this.fileContext.isUseHBaseChecksum());
|
return getDummyHeaderForVersion(this.fileContext.isUseHBaseChecksum());
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue