HBASE-1818 HFile code review and refinement -- reversing patch... doesn't pass tests

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@813158 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-09-09 23:00:13 +00:00
parent e78ff42412
commit 0593245163
2 changed files with 20 additions and 20 deletions

View File

@ -34,7 +34,6 @@ Release 0.21.0 - Unreleased
HBASE-1800 Too many ZK connections HBASE-1800 Too many ZK connections
HBASE-1819 Update to 0.20.1 hadoop and zk 3.2.1 HBASE-1819 Update to 0.20.1 hadoop and zk 3.2.1
HBASE-1820 Update jruby from 1.2 to 1.3.1 HBASE-1820 Update jruby from 1.2 to 1.3.1
HBASE-1818 HFile code review and refinement (Shubert Zhang via Stack)
OPTIMIZATIONS OPTIMIZATIONS

View File

@ -168,7 +168,7 @@ public class HFile {
protected String name; protected String name;
// Total uncompressed bytes, maybe calculate a compression ratio later. // Total uncompressed bytes, maybe calculate a compression ratio later.
private long totalBytes = 0; private int totalBytes = 0;
// Total # of key/value entries, ie: how many times add() was called. // Total # of key/value entries, ie: how many times add() was called.
private int entryCount = 0; private int entryCount = 0;
@ -320,12 +320,13 @@ public class HFile {
*/ */
private void finishBlock() throws IOException { private void finishBlock() throws IOException {
if (this.out == null) return; if (this.out == null) return;
int size = releaseCompressingStream(this.out); long size = releaseCompressingStream(this.out);
this.out = null; this.out = null;
blockKeys.add(firstKey); blockKeys.add(firstKey);
int written = longToInt(size);
blockOffsets.add(Long.valueOf(blockBegin)); blockOffsets.add(Long.valueOf(blockBegin));
blockDataSizes.add(Integer.valueOf(size)); blockDataSizes.add(Integer.valueOf(written));
this.totalBytes += size; this.totalBytes += written;
} }
/* /*
@ -334,10 +335,10 @@ public class HFile {
*/ */
private void newBlock() throws IOException { private void newBlock() throws IOException {
// This is where the next block begins. // This is where the next block begins.
this.blockBegin = outputStream.getPos(); blockBegin = outputStream.getPos();
this.out = getCompressingStream(); this.out = getCompressingStream();
this.out.write(DATABLOCKMAGIC); this.out.write(DATABLOCKMAGIC);
this.firstKey = null; firstKey = null;
} }
/* /*
@ -512,7 +513,7 @@ public class HFile {
} }
if (this.lastKeyBuffer != null) { if (this.lastKeyBuffer != null) {
if (this.comparator.compare(this.lastKeyBuffer, this.lastKeyOffset, if (this.comparator.compare(this.lastKeyBuffer, this.lastKeyOffset,
this.lastKeyLength, key, offset, length) >= 0) { this.lastKeyLength, key, offset, length) > 0) {
throw new IOException("Added a key not lexically larger than" + throw new IOException("Added a key not lexically larger than" +
" previous key=" + Bytes.toString(key, offset, length) + " previous key=" + Bytes.toString(key, offset, length) +
", lastkey=" + Bytes.toString(this.lastKeyBuffer, this.lastKeyOffset, ", lastkey=" + Bytes.toString(this.lastKeyBuffer, this.lastKeyOffset,
@ -619,7 +620,7 @@ public class HFile {
appendFileInfo(this.fileinfo, FileInfo.AVG_KEY_LEN, appendFileInfo(this.fileinfo, FileInfo.AVG_KEY_LEN,
Bytes.toBytes(avgKeyLen), false); Bytes.toBytes(avgKeyLen), false);
int avgValueLen = this.entryCount == 0? 0: int avgValueLen = this.entryCount == 0? 0:
(int)(this.valuelength/this.entryCount); (int)(this.keylength/this.entryCount);
appendFileInfo(this.fileinfo, FileInfo.AVG_VALUE_LEN, appendFileInfo(this.fileinfo, FileInfo.AVG_VALUE_LEN,
Bytes.toBytes(avgValueLen), false); Bytes.toBytes(avgValueLen), false);
appendFileInfo(this.fileinfo, FileInfo.COMPARATOR, appendFileInfo(this.fileinfo, FileInfo.COMPARATOR,
@ -859,7 +860,7 @@ public class HFile {
if (trailer.metaIndexCount == 0) { if (trailer.metaIndexCount == 0) {
return null; // there are no meta blocks return null; // there are no meta blocks
} }
if ((metaIndex == null) || (metaIndex.count == 0)) { if (metaIndex == null) {
throw new IOException("Meta index not loaded"); throw new IOException("Meta index not loaded");
} }
byte [] mbname = Bytes.toBytes(metaBlockName); byte [] mbname = Bytes.toBytes(metaBlockName);
@ -875,14 +876,16 @@ public class HFile {
ByteBuffer buf = decompress(metaIndex.blockOffsets[block], ByteBuffer buf = decompress(metaIndex.blockOffsets[block],
longToInt(blockSize), metaIndex.blockDataSizes[block]); longToInt(blockSize), metaIndex.blockDataSizes[block]);
if (buf == null)
return null;
byte [] magic = new byte[METABLOCKMAGIC.length]; byte [] magic = new byte[METABLOCKMAGIC.length];
buf.get(magic, 0, magic.length); buf.get(magic, 0, magic.length);
if (! Arrays.equals(magic, METABLOCKMAGIC)) { if (! Arrays.equals(magic, METABLOCKMAGIC)) {
throw new IOException("Meta magic is bad in block " + block); throw new IOException("Meta magic is bad in block " + block);
} }
// Toss the header. May have to remove later due to performance.
buf.compact();
buf.limit(buf.limit() - METABLOCKMAGIC.length);
buf.rewind();
return buf; return buf;
} }
/** /**
@ -895,7 +898,7 @@ public class HFile {
if (blockIndex == null) { if (blockIndex == null) {
throw new IOException("Block index not loaded"); throw new IOException("Block index not loaded");
} }
if (block < 0 || block >= blockIndex.count) { if (block < 0 || block > blockIndex.count) {
throw new IOException("Requested block is out of range: " + block + throw new IOException("Requested block is out of range: " + block +
", max: " + blockIndex.count); ", max: " + blockIndex.count);
} }
@ -932,15 +935,16 @@ public class HFile {
} }
ByteBuffer buf = decompress(blockIndex.blockOffsets[block], ByteBuffer buf = decompress(blockIndex.blockOffsets[block],
longToInt(onDiskBlockSize), this.blockIndex.blockDataSizes[block]); longToInt(onDiskBlockSize), this.blockIndex.blockDataSizes[block]);
if (buf == null) {
throw new IOException("Decompress block failure " + block);
}
byte [] magic = new byte[DATABLOCKMAGIC.length]; byte [] magic = new byte[DATABLOCKMAGIC.length];
buf.get(magic, 0, magic.length); buf.get(magic, 0, magic.length);
if (!Arrays.equals(magic, DATABLOCKMAGIC)) { if (!Arrays.equals(magic, DATABLOCKMAGIC)) {
throw new IOException("Data magic is bad in block " + block); throw new IOException("Data magic is bad in block " + block);
} }
// Toss the header. May have to remove later due to performance.
buf.compact();
buf.limit(buf.limit() - DATABLOCKMAGIC.length);
buf.rewind();
// Cache the block // Cache the block
if(cacheBlock && cache != null) { if(cacheBlock && cache != null) {
@ -1245,10 +1249,8 @@ public class HFile {
} }
if (block != null && currBlock == 0) { if (block != null && currBlock == 0) {
block.rewind(); block.rewind();
block.position(DATABLOCKMAGIC.length);
currKeyLen = block.getInt(); currKeyLen = block.getInt();
currValueLen = block.getInt(); currValueLen = block.getInt();
return true;
} }
currBlock = 0; currBlock = 0;
block = reader.readBlock(currBlock, cacheBlocks); block = reader.readBlock(currBlock, cacheBlocks);
@ -1271,7 +1273,6 @@ public class HFile {
} else { } else {
// we are already in the same block, just rewind to seek again. // we are already in the same block, just rewind to seek again.
block.rewind(); block.rewind();
block.position(DATABLOCKMAGIC.length);
} }
} }
} }
@ -1365,7 +1366,7 @@ public class HFile {
} }
/* /*
* The block index for a HFile. * The block index for a RFile.
* Used reading. * Used reading.
*/ */
static class BlockIndex implements HeapSize { static class BlockIndex implements HeapSize {