HBASE-3040 BlockIndex readIndex too slowly in heavy write scenario

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1001949 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-09-27 22:53:26 +00:00
parent 4f545ea09c
commit b11ac8d6f5
2 changed files with 57 additions and 4 deletions

View File

@ -943,6 +943,8 @@ Release 0.21.0 - Unreleased
HBASE-3022 Change format of enum messages in o.a.h.h.executor package
HBASE-3001 Ship dependency jars to the cluster for all jobs
HBASE-3033 [replication] ReplicationSink.replicateEntries improvements
HBASE-3040 BlockIndex readIndex too slowly in heavy write scenario
(Andy Chen via Stack)
NEW FEATURES
HBASE-1961 HBase EC2 scripts

View File

@ -23,6 +23,7 @@ import java.io.BufferedInputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@ -794,6 +795,14 @@ public class HFile {
return this.inMemory;
}
private byte[] readAllIndex(final FSDataInputStream in, final long indexOffset,
final int indexSize) throws IOException {
byte[] allIndex = new byte[indexSize];
in.seek(indexOffset);
IOUtils.readFully(in, allIndex, 0, allIndex.length);
return allIndex;
}
/**
* Read in the index and file info.
* @return A map of fileinfo data.
@ -814,16 +823,26 @@ public class HFile {
String clazzName = Bytes.toString(fi.get(FileInfo.COMPARATOR));
this.comparator = getComparator(clazzName);
int allIndexSize = (int)(this.fileSize - this.trailer.dataIndexOffset - FixedFileTrailer.trailerSize());
byte[] dataAndMetaIndex = readAllIndex(this.istream, this.trailer.dataIndexOffset, allIndexSize);
ByteArrayInputStream bis = new ByteArrayInputStream(dataAndMetaIndex);
DataInputStream dis = new DataInputStream(bis);
// Read in the data index.
this.blockIndex = BlockIndex.readIndex(this.comparator, this.istream,
this.trailer.dataIndexOffset, this.trailer.dataIndexCount);
this.blockIndex = BlockIndex.readIndexEx(this.comparator, dis, this.trailer.dataIndexCount);
// Read in the metadata index.
if (trailer.metaIndexCount > 0) {
this.metaIndex = BlockIndex.readIndex(Bytes.BYTES_RAWCOMPARATOR,
this.istream, this.trailer.metaIndexOffset, trailer.metaIndexCount);
this.metaIndex = BlockIndex.readIndexEx(Bytes.BYTES_RAWCOMPARATOR, dis,
this.trailer.metaIndexCount);
}
this.fileInfoLoaded = true;
if (null != dis) {
dis.close();
}
return fi;
}
@ -1681,6 +1700,38 @@ public class HFile {
return bi;
}
/*
* Read in the index that is at <code>indexOffset</code>
* Must match what was written by writeIndex in the Writer.close.
* @param in
* @param indexOffset
* @throws IOException
*/
static BlockIndex readIndexEx(final RawComparator<byte []> c, DataInputStream in,
final int indexSize)
throws IOException {
BlockIndex bi = new BlockIndex(c);
bi.blockOffsets = new long[indexSize];
bi.blockKeys = new byte[indexSize][];
bi.blockDataSizes = new int[indexSize];
// If index size is zero, no index was written.
if (indexSize > 0) {
byte [] magic = new byte[INDEXBLOCKMAGIC.length];
in.readFully(magic);
if (!Arrays.equals(magic, INDEXBLOCKMAGIC)) {
throw new IOException("Index block magic is wrong: " +
Arrays.toString(magic));
}
for (int i = 0; i < indexSize; ++i ) {
long offset = in.readLong();
int dataSize = in.readInt();
byte [] key = Bytes.readByteArray(in);
bi.add(key, offset, dataSize);
}
}
return bi;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();