HBASE-3514 Speedup HFile.Writer append
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1074001 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
8811bed468
commit
501afa4ffb
|
@ -51,6 +51,7 @@ Release 0.91.0 - Unreleased
|
|||
unflushable regions.
|
||||
HBASE-3550 FilterList reports false positives (Bill Graham via Andrew
|
||||
Purtell)
|
||||
HBASE-3514 Speedup HFile.Writer append
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)
|
||||
|
|
|
@ -221,9 +221,6 @@ public class HFile {
|
|||
// Used to ensure we write in order.
|
||||
private final RawComparator<byte []> comparator;
|
||||
|
||||
// A stream made per block written.
|
||||
private DataOutputStream out;
|
||||
|
||||
// Number of uncompressed bytes per block. Reinitialized when we start
|
||||
// new block.
|
||||
private int blocksize;
|
||||
|
@ -264,9 +261,9 @@ public class HFile {
|
|||
// Block cache to optionally fill on write
|
||||
private BlockCache blockCache;
|
||||
|
||||
// Additional byte array output stream used to fill block cache
|
||||
private ByteArrayOutputStream baos;
|
||||
private DataOutputStream baosDos;
|
||||
// Byte array output stream made per block written.
|
||||
private ByteArrayOutputStream baos = null;
|
||||
private DataOutputStream baosDos = null;
|
||||
private int blockNumber = 0;
|
||||
|
||||
/**
|
||||
|
@ -360,7 +357,7 @@ public class HFile {
|
|||
* @throws IOException
|
||||
*/
|
||||
private void checkBlockBoundary() throws IOException {
|
||||
if (this.out != null && this.out.size() < blocksize) return;
|
||||
if (baosDos != null && baosDos.size() < blocksize) return;
|
||||
finishBlock();
|
||||
newBlock();
|
||||
}
|
||||
|
@ -370,11 +367,18 @@ public class HFile {
|
|||
* @throws IOException
|
||||
*/
|
||||
private void finishBlock() throws IOException {
|
||||
if (this.out == null) return;
|
||||
if (baosDos == null) return;
|
||||
|
||||
// Flush Data Output Stream
|
||||
baosDos.flush();
|
||||
|
||||
// Compress Data and write to output stream
|
||||
DataOutputStream compressStream = getCompressingStream();
|
||||
baos.writeTo(compressStream);
|
||||
int size = releaseCompressingStream(compressStream);
|
||||
|
||||
long now = System.currentTimeMillis();
|
||||
|
||||
int size = releaseCompressingStream(this.out);
|
||||
this.out = null;
|
||||
blockKeys.add(firstKey);
|
||||
blockOffsets.add(Long.valueOf(blockBegin));
|
||||
blockDataSizes.add(Integer.valueOf(size));
|
||||
|
@ -384,14 +388,17 @@ public class HFile {
|
|||
writeOps++;
|
||||
|
||||
if (blockCache != null) {
|
||||
baosDos.flush();
|
||||
byte [] bytes = baos.toByteArray();
|
||||
ByteBuffer blockToCache = ByteBuffer.wrap(bytes, DATABLOCKMAGIC.length,
|
||||
bytes.length - DATABLOCKMAGIC.length);
|
||||
String blockName = path.toString() + blockNumber;
|
||||
blockCache.cacheBlock(blockName, blockToCache);
|
||||
baosDos.close();
|
||||
}
|
||||
|
||||
baosDos.close();
|
||||
baosDos = null;
|
||||
baos = null;
|
||||
|
||||
blockNumber++;
|
||||
}
|
||||
|
||||
|
@ -402,14 +409,14 @@ public class HFile {
|
|||
private void newBlock() throws IOException {
|
||||
// This is where the next block begins.
|
||||
blockBegin = outputStream.getPos();
|
||||
this.out = getCompressingStream();
|
||||
this.out.write(DATABLOCKMAGIC);
|
||||
|
||||
firstKey = null;
|
||||
if (blockCache != null) {
|
||||
this.baos = new ByteArrayOutputStream();
|
||||
this.baosDos = new DataOutputStream(baos);
|
||||
this.baosDos.write(DATABLOCKMAGIC);
|
||||
}
|
||||
|
||||
// to avoid too many calls to realloc(),
|
||||
// pre-allocates the byte stream to the block size + 25%
|
||||
baos = new ByteArrayOutputStream(blocksize + (int)(blocksize * 0.25));
|
||||
baosDos = new DataOutputStream(baos);
|
||||
baosDos.write(DATABLOCKMAGIC);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -467,7 +474,7 @@ public class HFile {
|
|||
for (i = 0; i < metaNames.size(); ++i) {
|
||||
// stop when the current key is greater than our own
|
||||
byte[] cur = metaNames.get(i);
|
||||
if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length,
|
||||
if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length,
|
||||
key, 0, key.length) > 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -563,12 +570,12 @@ public class HFile {
|
|||
checkBlockBoundary();
|
||||
}
|
||||
// Write length of key and value and then actual key and value bytes.
|
||||
this.out.writeInt(klength);
|
||||
this.baosDos.writeInt(klength);
|
||||
this.keylength += klength;
|
||||
this.out.writeInt(vlength);
|
||||
this.baosDos.writeInt(vlength);
|
||||
this.valuelength += vlength;
|
||||
this.out.write(key, koffset, klength);
|
||||
this.out.write(value, voffset, vlength);
|
||||
this.baosDos.write(key, koffset, klength);
|
||||
this.baosDos.write(value, voffset, vlength);
|
||||
// Are we the first key in this block?
|
||||
if (this.firstKey == null) {
|
||||
// Copy the key.
|
||||
|
@ -579,13 +586,6 @@ public class HFile {
|
|||
this.lastKeyOffset = koffset;
|
||||
this.lastKeyLength = klength;
|
||||
this.entryCount ++;
|
||||
// If we are pre-caching blocks on write, fill byte array stream
|
||||
if (blockCache != null) {
|
||||
this.baosDos.writeInt(klength);
|
||||
this.baosDos.writeInt(vlength);
|
||||
this.baosDos.write(key, koffset, klength);
|
||||
this.baosDos.write(value, voffset, vlength);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue