HBASE-1818 HFile code review and refinement -- reversing patch... doesn't pass tests
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@813158 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e78ff42412
commit
0593245163
|
@ -34,7 +34,6 @@ Release 0.21.0 - Unreleased
|
|||
HBASE-1800 Too many ZK connections
|
||||
HBASE-1819 Update to 0.20.1 hadoop and zk 3.2.1
|
||||
HBASE-1820 Update jruby from 1.2 to 1.3.1
|
||||
HBASE-1818 HFile code review and refinement (Shubert Zhang via Stack)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ public class HFile {
|
|||
protected String name;
|
||||
|
||||
// Total uncompressed bytes, maybe calculate a compression ratio later.
|
||||
private long totalBytes = 0;
|
||||
private int totalBytes = 0;
|
||||
|
||||
// Total # of key/value entries, ie: how many times add() was called.
|
||||
private int entryCount = 0;
|
||||
|
@ -320,12 +320,13 @@ public class HFile {
|
|||
*/
|
||||
private void finishBlock() throws IOException {
|
||||
if (this.out == null) return;
|
||||
int size = releaseCompressingStream(this.out);
|
||||
long size = releaseCompressingStream(this.out);
|
||||
this.out = null;
|
||||
blockKeys.add(firstKey);
|
||||
int written = longToInt(size);
|
||||
blockOffsets.add(Long.valueOf(blockBegin));
|
||||
blockDataSizes.add(Integer.valueOf(size));
|
||||
this.totalBytes += size;
|
||||
blockDataSizes.add(Integer.valueOf(written));
|
||||
this.totalBytes += written;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -334,10 +335,10 @@ public class HFile {
|
|||
*/
|
||||
private void newBlock() throws IOException {
|
||||
// This is where the next block begins.
|
||||
this.blockBegin = outputStream.getPos();
|
||||
blockBegin = outputStream.getPos();
|
||||
this.out = getCompressingStream();
|
||||
this.out.write(DATABLOCKMAGIC);
|
||||
this.firstKey = null;
|
||||
firstKey = null;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -512,7 +513,7 @@ public class HFile {
|
|||
}
|
||||
if (this.lastKeyBuffer != null) {
|
||||
if (this.comparator.compare(this.lastKeyBuffer, this.lastKeyOffset,
|
||||
this.lastKeyLength, key, offset, length) >= 0) {
|
||||
this.lastKeyLength, key, offset, length) > 0) {
|
||||
throw new IOException("Added a key not lexically larger than" +
|
||||
" previous key=" + Bytes.toString(key, offset, length) +
|
||||
", lastkey=" + Bytes.toString(this.lastKeyBuffer, this.lastKeyOffset,
|
||||
|
@ -619,7 +620,7 @@ public class HFile {
|
|||
appendFileInfo(this.fileinfo, FileInfo.AVG_KEY_LEN,
|
||||
Bytes.toBytes(avgKeyLen), false);
|
||||
int avgValueLen = this.entryCount == 0? 0:
|
||||
(int)(this.valuelength/this.entryCount);
|
||||
(int)(this.keylength/this.entryCount);
|
||||
appendFileInfo(this.fileinfo, FileInfo.AVG_VALUE_LEN,
|
||||
Bytes.toBytes(avgValueLen), false);
|
||||
appendFileInfo(this.fileinfo, FileInfo.COMPARATOR,
|
||||
|
@ -859,7 +860,7 @@ public class HFile {
|
|||
if (trailer.metaIndexCount == 0) {
|
||||
return null; // there are no meta blocks
|
||||
}
|
||||
if ((metaIndex == null) || (metaIndex.count == 0)) {
|
||||
if (metaIndex == null) {
|
||||
throw new IOException("Meta index not loaded");
|
||||
}
|
||||
byte [] mbname = Bytes.toBytes(metaBlockName);
|
||||
|
@ -875,14 +876,16 @@ public class HFile {
|
|||
|
||||
ByteBuffer buf = decompress(metaIndex.blockOffsets[block],
|
||||
longToInt(blockSize), metaIndex.blockDataSizes[block]);
|
||||
if (buf == null)
|
||||
return null;
|
||||
byte [] magic = new byte[METABLOCKMAGIC.length];
|
||||
buf.get(magic, 0, magic.length);
|
||||
|
||||
if (! Arrays.equals(magic, METABLOCKMAGIC)) {
|
||||
throw new IOException("Meta magic is bad in block " + block);
|
||||
}
|
||||
// Toss the header. May have to remove later due to performance.
|
||||
buf.compact();
|
||||
buf.limit(buf.limit() - METABLOCKMAGIC.length);
|
||||
buf.rewind();
|
||||
return buf;
|
||||
}
|
||||
/**
|
||||
|
@ -895,7 +898,7 @@ public class HFile {
|
|||
if (blockIndex == null) {
|
||||
throw new IOException("Block index not loaded");
|
||||
}
|
||||
if (block < 0 || block >= blockIndex.count) {
|
||||
if (block < 0 || block > blockIndex.count) {
|
||||
throw new IOException("Requested block is out of range: " + block +
|
||||
", max: " + blockIndex.count);
|
||||
}
|
||||
|
@ -932,15 +935,16 @@ public class HFile {
|
|||
}
|
||||
ByteBuffer buf = decompress(blockIndex.blockOffsets[block],
|
||||
longToInt(onDiskBlockSize), this.blockIndex.blockDataSizes[block]);
|
||||
if (buf == null) {
|
||||
throw new IOException("Decompress block failure " + block);
|
||||
}
|
||||
|
||||
byte [] magic = new byte[DATABLOCKMAGIC.length];
|
||||
buf.get(magic, 0, magic.length);
|
||||
if (!Arrays.equals(magic, DATABLOCKMAGIC)) {
|
||||
throw new IOException("Data magic is bad in block " + block);
|
||||
}
|
||||
// Toss the header. May have to remove later due to performance.
|
||||
buf.compact();
|
||||
buf.limit(buf.limit() - DATABLOCKMAGIC.length);
|
||||
buf.rewind();
|
||||
|
||||
// Cache the block
|
||||
if(cacheBlock && cache != null) {
|
||||
|
@ -1245,10 +1249,8 @@ public class HFile {
|
|||
}
|
||||
if (block != null && currBlock == 0) {
|
||||
block.rewind();
|
||||
block.position(DATABLOCKMAGIC.length);
|
||||
currKeyLen = block.getInt();
|
||||
currValueLen = block.getInt();
|
||||
return true;
|
||||
}
|
||||
currBlock = 0;
|
||||
block = reader.readBlock(currBlock, cacheBlocks);
|
||||
|
@ -1271,7 +1273,6 @@ public class HFile {
|
|||
} else {
|
||||
// we are already in the same block, just rewind to seek again.
|
||||
block.rewind();
|
||||
block.position(DATABLOCKMAGIC.length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1365,7 +1366,7 @@ public class HFile {
|
|||
}
|
||||
|
||||
/*
|
||||
* The block index for a HFile.
|
||||
* The block index for a RFile.
|
||||
* Used reading.
|
||||
*/
|
||||
static class BlockIndex implements HeapSize {
|
||||
|
|
Loading…
Reference in New Issue