HBASE-15238 HFileReaderV2 prefetch overreaches; runs off the end of the data
This commit is contained in:
parent
01b73e9877
commit
869c0cf43c
|
@ -86,7 +86,7 @@ public class ChecksumUtil {
|
||||||
* The header is extracted from the specified HFileBlock while the
|
* The header is extracted from the specified HFileBlock while the
|
||||||
* data-to-be-verified is extracted from 'data'.
|
* data-to-be-verified is extracted from 'data'.
|
||||||
*/
|
*/
|
||||||
static boolean validateBlockChecksum(String pathName, HFileBlock block,
|
static boolean validateBlockChecksum(String pathName, long offset, HFileBlock block,
|
||||||
byte[] data, int hdrSize) throws IOException {
|
byte[] data, int hdrSize) throws IOException {
|
||||||
|
|
||||||
// If this is an older version of the block that does not have
|
// If this is an older version of the block that does not have
|
||||||
|
@ -116,12 +116,13 @@ public class ChecksumUtil {
|
||||||
assert dataChecksum != null;
|
assert dataChecksum != null;
|
||||||
int sizeWithHeader = block.getOnDiskDataSizeWithHeader();
|
int sizeWithHeader = block.getOnDiskDataSizeWithHeader();
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.info("length of data = " + data.length
|
LOG.info("dataLength=" + data.length
|
||||||
+ " OnDiskDataSizeWithHeader = " + sizeWithHeader
|
+ ", sizeWithHeader=" + sizeWithHeader
|
||||||
+ " checksum type = " + cktype.getName()
|
+ ", checksumType=" + cktype.getName()
|
||||||
+ " file =" + pathName
|
+ ", file=" + pathName
|
||||||
+ " header size = " + hdrSize
|
+ ", offset=" + offset
|
||||||
+ " bytesPerChecksum = " + bytesPerChecksum);
|
+ ", headerSize=" + hdrSize
|
||||||
|
+ ", bytesPerChecksum=" + bytesPerChecksum);
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
dataChecksum.verifyChunkedSums(ByteBuffer.wrap(data, 0, sizeWithHeader),
|
dataChecksum.verifyChunkedSums(ByteBuffer.wrap(data, 0, sizeWithHeader),
|
||||||
|
|
|
@ -41,8 +41,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||||
* trailer size is fixed within a given {@link HFile} format version only, but
|
* trailer size is fixed within a given {@link HFile} format version only, but
|
||||||
* we always store the version number as the last four-byte integer of the file.
|
* we always store the version number as the last four-byte integer of the file.
|
||||||
* The version number itself is split into two portions, a major
|
* The version number itself is split into two portions, a major
|
||||||
* version and a minor version.
|
* version and a minor version. The last three bytes of a file are the major
|
||||||
* The last three bytes of a file is the major
|
|
||||||
* version and a single preceding byte is the minor number. The major version
|
* version and a single preceding byte is the minor number. The major version
|
||||||
* determines which readers/writers to use to read/write a hfile while a minor
|
* determines which readers/writers to use to read/write a hfile while a minor
|
||||||
* version determines smaller changes in hfile format that do not need a new
|
* version determines smaller changes in hfile format that do not need a new
|
||||||
|
@ -50,7 +49,6 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class FixedFileTrailer {
|
public class FixedFileTrailer {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* We store the comparator class name as a fixed-length field in the trailer.
|
* We store the comparator class name as a fixed-length field in the trailer.
|
||||||
*/
|
*/
|
||||||
|
@ -65,8 +63,9 @@ public class FixedFileTrailer {
|
||||||
/**
|
/**
|
||||||
* In version 1, the offset to the data block index. Starting from version 2,
|
* In version 1, the offset to the data block index. Starting from version 2,
|
||||||
* the meaning of this field is the offset to the section of the file that
|
* the meaning of this field is the offset to the section of the file that
|
||||||
* should be loaded at the time the file is being opened, and as of the time
|
* should be loaded at the time the file is being opened: i.e. on open we load
|
||||||
* of writing, this happens to be the offset of the file info section.
|
* the root index, file info, etc. See http://hbase.apache.org/book.html#_hfile_format_2
|
||||||
|
* in the reference guide.
|
||||||
*/
|
*/
|
||||||
private long loadOnOpenDataOffset;
|
private long loadOnOpenDataOffset;
|
||||||
|
|
||||||
|
|
|
@ -1695,7 +1695,7 @@ public class HFileBlock implements Cacheable {
|
||||||
b.assumeUncompressed();
|
b.assumeUncompressed();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (verifyChecksum && !validateBlockChecksum(b, onDiskBlock, hdrSize)) {
|
if (verifyChecksum && !validateBlockChecksum(b, offset, onDiskBlock, hdrSize)) {
|
||||||
return null; // checksum mismatch
|
return null; // checksum mismatch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1744,9 +1744,10 @@ public class HFileBlock implements Cacheable {
|
||||||
* If there is a checksum mismatch, then return false. Otherwise
|
* If there is a checksum mismatch, then return false. Otherwise
|
||||||
* return true.
|
* return true.
|
||||||
*/
|
*/
|
||||||
protected boolean validateBlockChecksum(HFileBlock block, byte[] data, int hdrSize)
|
protected boolean validateBlockChecksum(HFileBlock block, long offset, byte[] data,
|
||||||
throws IOException {
|
int hdrSize)
|
||||||
return ChecksumUtil.validateBlockChecksum(pathName, block, data, hdrSize);
|
throws IOException {
|
||||||
|
return ChecksumUtil.validateBlockChecksum(pathName, offset, block, data, hdrSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -189,10 +189,14 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
||||||
if (cacheConf.shouldPrefetchOnOpen()) {
|
if (cacheConf.shouldPrefetchOnOpen()) {
|
||||||
PrefetchExecutor.request(path, new Runnable() {
|
PrefetchExecutor.request(path, new Runnable() {
|
||||||
public void run() {
|
public void run() {
|
||||||
|
long offset = 0;
|
||||||
|
long end = 0;
|
||||||
try {
|
try {
|
||||||
long offset = 0;
|
end = getTrailer().getLoadOnOpenDataOffset();
|
||||||
long end = fileSize - getTrailer().getTrailerSize();
|
|
||||||
HFileBlock prevBlock = null;
|
HFileBlock prevBlock = null;
|
||||||
|
if (LOG.isTraceEnabled()) {
|
||||||
|
LOG.trace("File=" + path.toString() + ", offset=" + offset + ", end=" + end);
|
||||||
|
}
|
||||||
while (offset < end) {
|
while (offset < end) {
|
||||||
if (Thread.interrupted()) {
|
if (Thread.interrupted()) {
|
||||||
break;
|
break;
|
||||||
|
@ -209,11 +213,11 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// IOExceptions are probably due to region closes (relocation, etc.)
|
// IOExceptions are probably due to region closes (relocation, etc.)
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("Exception encountered while prefetching " + path + ":", e);
|
LOG.trace("File=" + path.toString() + ", offset=" + offset + ", end=" + end, e);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Other exceptions are interesting
|
// Other exceptions are interesting
|
||||||
LOG.warn("Exception encountered while prefetching " + path + ":", e);
|
LOG.warn("File=" + path.toString() + ", offset=" + offset + ", end=" + end, e);
|
||||||
} finally {
|
} finally {
|
||||||
PrefetchExecutor.complete(path);
|
PrefetchExecutor.complete(path);
|
||||||
}
|
}
|
||||||
|
@ -380,9 +384,11 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
||||||
if (dataBlockIndexReader == null) {
|
if (dataBlockIndexReader == null) {
|
||||||
throw new IOException("Block index not loaded");
|
throw new IOException("Block index not loaded");
|
||||||
}
|
}
|
||||||
if (dataBlockOffset < 0 || dataBlockOffset >= trailer.getLoadOnOpenDataOffset()) {
|
long trailerOffset = trailer.getLoadOnOpenDataOffset();
|
||||||
|
if (dataBlockOffset < 0 || dataBlockOffset >= trailerOffset) {
|
||||||
throw new IOException("Requested block is out of range: " + dataBlockOffset +
|
throw new IOException("Requested block is out of range: " + dataBlockOffset +
|
||||||
", lastDataBlockOffset: " + trailer.getLastDataBlockOffset());
|
", lastDataBlockOffset: " + trailer.getLastDataBlockOffset() +
|
||||||
|
", trailer.getLoadOnOpenDataOffset: " + trailerOffset);
|
||||||
}
|
}
|
||||||
|
|
||||||
// For any given block from any given file, synchronize reads for said block.
|
// For any given block from any given file, synchronize reads for said block.
|
||||||
|
|
|
@ -348,7 +348,7 @@ public class TestChecksum {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean validateBlockChecksum(HFileBlock block,
|
protected boolean validateBlockChecksum(HFileBlock block, long offset,
|
||||||
byte[] data, int hdrSize) throws IOException {
|
byte[] data, int hdrSize) throws IOException {
|
||||||
return false; // checksum validation failure
|
return false; // checksum validation failure
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue