HBASE-1818 HFile code review and refinement
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@813138 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9e11a346df
commit
e78ff42412
|
@ -34,6 +34,7 @@ Release 0.21.0 - Unreleased
|
||||||
HBASE-1800 Too many ZK connections
|
HBASE-1800 Too many ZK connections
|
||||||
HBASE-1819 Update to 0.20.1 hadoop and zk 3.2.1
|
HBASE-1819 Update to 0.20.1 hadoop and zk 3.2.1
|
||||||
HBASE-1820 Update jruby from 1.2 to 1.3.1
|
HBASE-1820 Update jruby from 1.2 to 1.3.1
|
||||||
|
HBASE-1818 HFile code review and refinement (Shubert Zhang via Stack)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
|
|
@ -168,7 +168,7 @@ public class HFile {
|
||||||
protected String name;
|
protected String name;
|
||||||
|
|
||||||
// Total uncompressed bytes, maybe calculate a compression ratio later.
|
// Total uncompressed bytes, maybe calculate a compression ratio later.
|
||||||
private int totalBytes = 0;
|
private long totalBytes = 0;
|
||||||
|
|
||||||
// Total # of key/value entries, ie: how many times add() was called.
|
// Total # of key/value entries, ie: how many times add() was called.
|
||||||
private int entryCount = 0;
|
private int entryCount = 0;
|
||||||
|
@ -320,13 +320,12 @@ public class HFile {
|
||||||
*/
|
*/
|
||||||
private void finishBlock() throws IOException {
|
private void finishBlock() throws IOException {
|
||||||
if (this.out == null) return;
|
if (this.out == null) return;
|
||||||
long size = releaseCompressingStream(this.out);
|
int size = releaseCompressingStream(this.out);
|
||||||
this.out = null;
|
this.out = null;
|
||||||
blockKeys.add(firstKey);
|
blockKeys.add(firstKey);
|
||||||
int written = longToInt(size);
|
|
||||||
blockOffsets.add(Long.valueOf(blockBegin));
|
blockOffsets.add(Long.valueOf(blockBegin));
|
||||||
blockDataSizes.add(Integer.valueOf(written));
|
blockDataSizes.add(Integer.valueOf(size));
|
||||||
this.totalBytes += written;
|
this.totalBytes += size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -335,10 +334,10 @@ public class HFile {
|
||||||
*/
|
*/
|
||||||
private void newBlock() throws IOException {
|
private void newBlock() throws IOException {
|
||||||
// This is where the next block begins.
|
// This is where the next block begins.
|
||||||
blockBegin = outputStream.getPos();
|
this.blockBegin = outputStream.getPos();
|
||||||
this.out = getCompressingStream();
|
this.out = getCompressingStream();
|
||||||
this.out.write(DATABLOCKMAGIC);
|
this.out.write(DATABLOCKMAGIC);
|
||||||
firstKey = null;
|
this.firstKey = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -513,7 +512,7 @@ public class HFile {
|
||||||
}
|
}
|
||||||
if (this.lastKeyBuffer != null) {
|
if (this.lastKeyBuffer != null) {
|
||||||
if (this.comparator.compare(this.lastKeyBuffer, this.lastKeyOffset,
|
if (this.comparator.compare(this.lastKeyBuffer, this.lastKeyOffset,
|
||||||
this.lastKeyLength, key, offset, length) > 0) {
|
this.lastKeyLength, key, offset, length) >= 0) {
|
||||||
throw new IOException("Added a key not lexically larger than" +
|
throw new IOException("Added a key not lexically larger than" +
|
||||||
" previous key=" + Bytes.toString(key, offset, length) +
|
" previous key=" + Bytes.toString(key, offset, length) +
|
||||||
", lastkey=" + Bytes.toString(this.lastKeyBuffer, this.lastKeyOffset,
|
", lastkey=" + Bytes.toString(this.lastKeyBuffer, this.lastKeyOffset,
|
||||||
|
@ -620,7 +619,7 @@ public class HFile {
|
||||||
appendFileInfo(this.fileinfo, FileInfo.AVG_KEY_LEN,
|
appendFileInfo(this.fileinfo, FileInfo.AVG_KEY_LEN,
|
||||||
Bytes.toBytes(avgKeyLen), false);
|
Bytes.toBytes(avgKeyLen), false);
|
||||||
int avgValueLen = this.entryCount == 0? 0:
|
int avgValueLen = this.entryCount == 0? 0:
|
||||||
(int)(this.keylength/this.entryCount);
|
(int)(this.valuelength/this.entryCount);
|
||||||
appendFileInfo(this.fileinfo, FileInfo.AVG_VALUE_LEN,
|
appendFileInfo(this.fileinfo, FileInfo.AVG_VALUE_LEN,
|
||||||
Bytes.toBytes(avgValueLen), false);
|
Bytes.toBytes(avgValueLen), false);
|
||||||
appendFileInfo(this.fileinfo, FileInfo.COMPARATOR,
|
appendFileInfo(this.fileinfo, FileInfo.COMPARATOR,
|
||||||
|
@ -860,7 +859,7 @@ public class HFile {
|
||||||
if (trailer.metaIndexCount == 0) {
|
if (trailer.metaIndexCount == 0) {
|
||||||
return null; // there are no meta blocks
|
return null; // there are no meta blocks
|
||||||
}
|
}
|
||||||
if (metaIndex == null) {
|
if ((metaIndex == null) || (metaIndex.count == 0)) {
|
||||||
throw new IOException("Meta index not loaded");
|
throw new IOException("Meta index not loaded");
|
||||||
}
|
}
|
||||||
byte [] mbname = Bytes.toBytes(metaBlockName);
|
byte [] mbname = Bytes.toBytes(metaBlockName);
|
||||||
|
@ -876,16 +875,14 @@ public class HFile {
|
||||||
|
|
||||||
ByteBuffer buf = decompress(metaIndex.blockOffsets[block],
|
ByteBuffer buf = decompress(metaIndex.blockOffsets[block],
|
||||||
longToInt(blockSize), metaIndex.blockDataSizes[block]);
|
longToInt(blockSize), metaIndex.blockDataSizes[block]);
|
||||||
|
if (buf == null)
|
||||||
|
return null;
|
||||||
byte [] magic = new byte[METABLOCKMAGIC.length];
|
byte [] magic = new byte[METABLOCKMAGIC.length];
|
||||||
buf.get(magic, 0, magic.length);
|
buf.get(magic, 0, magic.length);
|
||||||
|
|
||||||
if (! Arrays.equals(magic, METABLOCKMAGIC)) {
|
if (! Arrays.equals(magic, METABLOCKMAGIC)) {
|
||||||
throw new IOException("Meta magic is bad in block " + block);
|
throw new IOException("Meta magic is bad in block " + block);
|
||||||
}
|
}
|
||||||
// Toss the header. May have to remove later due to performance.
|
|
||||||
buf.compact();
|
|
||||||
buf.limit(buf.limit() - METABLOCKMAGIC.length);
|
|
||||||
buf.rewind();
|
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
@ -898,7 +895,7 @@ public class HFile {
|
||||||
if (blockIndex == null) {
|
if (blockIndex == null) {
|
||||||
throw new IOException("Block index not loaded");
|
throw new IOException("Block index not loaded");
|
||||||
}
|
}
|
||||||
if (block < 0 || block > blockIndex.count) {
|
if (block < 0 || block >= blockIndex.count) {
|
||||||
throw new IOException("Requested block is out of range: " + block +
|
throw new IOException("Requested block is out of range: " + block +
|
||||||
", max: " + blockIndex.count);
|
", max: " + blockIndex.count);
|
||||||
}
|
}
|
||||||
|
@ -935,16 +932,15 @@ public class HFile {
|
||||||
}
|
}
|
||||||
ByteBuffer buf = decompress(blockIndex.blockOffsets[block],
|
ByteBuffer buf = decompress(blockIndex.blockOffsets[block],
|
||||||
longToInt(onDiskBlockSize), this.blockIndex.blockDataSizes[block]);
|
longToInt(onDiskBlockSize), this.blockIndex.blockDataSizes[block]);
|
||||||
|
if (buf == null) {
|
||||||
|
throw new IOException("Decompress block failure " + block);
|
||||||
|
}
|
||||||
|
|
||||||
byte [] magic = new byte[DATABLOCKMAGIC.length];
|
byte [] magic = new byte[DATABLOCKMAGIC.length];
|
||||||
buf.get(magic, 0, magic.length);
|
buf.get(magic, 0, magic.length);
|
||||||
if (!Arrays.equals(magic, DATABLOCKMAGIC)) {
|
if (!Arrays.equals(magic, DATABLOCKMAGIC)) {
|
||||||
throw new IOException("Data magic is bad in block " + block);
|
throw new IOException("Data magic is bad in block " + block);
|
||||||
}
|
}
|
||||||
// Toss the header. May have to remove later due to performance.
|
|
||||||
buf.compact();
|
|
||||||
buf.limit(buf.limit() - DATABLOCKMAGIC.length);
|
|
||||||
buf.rewind();
|
|
||||||
|
|
||||||
// Cache the block
|
// Cache the block
|
||||||
if(cacheBlock && cache != null) {
|
if(cacheBlock && cache != null) {
|
||||||
|
@ -1249,8 +1245,10 @@ public class HFile {
|
||||||
}
|
}
|
||||||
if (block != null && currBlock == 0) {
|
if (block != null && currBlock == 0) {
|
||||||
block.rewind();
|
block.rewind();
|
||||||
|
block.position(DATABLOCKMAGIC.length);
|
||||||
currKeyLen = block.getInt();
|
currKeyLen = block.getInt();
|
||||||
currValueLen = block.getInt();
|
currValueLen = block.getInt();
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
currBlock = 0;
|
currBlock = 0;
|
||||||
block = reader.readBlock(currBlock, cacheBlocks);
|
block = reader.readBlock(currBlock, cacheBlocks);
|
||||||
|
@ -1273,6 +1271,7 @@ public class HFile {
|
||||||
} else {
|
} else {
|
||||||
// we are already in the same block, just rewind to seek again.
|
// we are already in the same block, just rewind to seek again.
|
||||||
block.rewind();
|
block.rewind();
|
||||||
|
block.position(DATABLOCKMAGIC.length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1366,7 +1365,7 @@ public class HFile {
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The block index for a RFile.
|
* The block index for a HFile.
|
||||||
* Used reading.
|
* Used reading.
|
||||||
*/
|
*/
|
||||||
static class BlockIndex implements HeapSize {
|
static class BlockIndex implements HeapSize {
|
||||||
|
|
Loading…
Reference in New Issue