HBASE-17020 keylen in midkey() dont computed correctly
Signed-off-by: Yu Li <liyu@apache.org>
This commit is contained in:
parent
7f08cd0e10
commit
fb4c50966e
|
@ -426,7 +426,8 @@ public class HFileBlockIndex {
|
|||
ByteBuff b = midLeafBlock.getBufferWithoutHeader();
|
||||
int numDataBlocks = b.getIntAfterPosition(0);
|
||||
int keyRelOffset = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 1));
|
||||
int keyLen = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 2)) - keyRelOffset;
|
||||
int keyLen = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 2)) - keyRelOffset
|
||||
- SECONDARY_INDEX_ENTRY_OVERHEAD;
|
||||
int keyOffset =
|
||||
Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset
|
||||
+ SECONDARY_INDEX_ENTRY_OVERHEAD;
|
||||
|
|
|
@ -101,4 +101,33 @@ public class RandomKeyValueUtil {
|
|||
byte[] keyBytes = k.toString().getBytes();
|
||||
return keyBytes;
|
||||
}
|
||||
|
||||
public static byte[] randomOrderedFixedLengthKey(Random rand, int i, int suffixLength) {
|
||||
StringBuilder k = new StringBuilder();
|
||||
|
||||
// The fixed-length lexicographically increasing part of the key.
|
||||
for (int bitIndex = 31; bitIndex >= 0; --bitIndex) {
|
||||
if ((i & (1 << bitIndex)) == 0)
|
||||
k.append("a");
|
||||
else
|
||||
k.append("b");
|
||||
}
|
||||
|
||||
// A random suffix of the key.
|
||||
for (int j = 0; j < suffixLength; ++j)
|
||||
k.append(randomReadableChar(rand));
|
||||
|
||||
byte[] keyBytes = k.toString().getBytes();
|
||||
return keyBytes;
|
||||
}
|
||||
|
||||
public static byte[] randomFixedLengthValue(Random rand, int valueLength) {
|
||||
StringBuilder v = new StringBuilder();
|
||||
for (int j = 0; j < valueLength; ++j) {
|
||||
v.append((char) (32 + rand.nextInt(95)));
|
||||
}
|
||||
|
||||
byte[] valueBytes = v.toString().getBytes();
|
||||
return valueBytes;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.KeyValue;
|
|||
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
|
||||
|
@ -513,6 +514,73 @@ public class TestHFileBlockIndex {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* to check if looks good when midKey on a leaf index block boundary
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testMidKeyOnLeafIndexBlockBoundary() throws IOException {
|
||||
Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
|
||||
"hfile_for_midkey");
|
||||
int maxChunkSize = 512;
|
||||
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
|
||||
// should open hfile.block.index.cacheonwrite
|
||||
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true);
|
||||
|
||||
CacheConfig cacheConf = new CacheConfig(conf);
|
||||
BlockCache blockCache = cacheConf.getBlockCache();
|
||||
// Evict all blocks that were cached-on-write by the previous invocation.
|
||||
blockCache.evictBlocksByHfileName(hfilePath.getName());
|
||||
// Write the HFile
|
||||
{
|
||||
HFileContext meta = new HFileContextBuilder()
|
||||
.withBlockSize(SMALL_BLOCK_SIZE)
|
||||
.withCompression(Algorithm.NONE)
|
||||
.withDataBlockEncoding(DataBlockEncoding.NONE)
|
||||
.build();
|
||||
HFile.Writer writer =
|
||||
HFile.getWriterFactory(conf, cacheConf)
|
||||
.withPath(fs, hfilePath)
|
||||
.withFileContext(meta)
|
||||
.create();
|
||||
Random rand = new Random(19231737);
|
||||
byte[] family = Bytes.toBytes("f");
|
||||
byte[] qualifier = Bytes.toBytes("q");
|
||||
int kvNumberToBeWritten = 16;
|
||||
// the new generated hfile will contain 2 leaf-index blocks and 16 data blocks,
|
||||
// midkey is just on the boundary of the first leaf-index block
|
||||
for (int i = 0; i < kvNumberToBeWritten; ++i) {
|
||||
byte[] row = RandomKeyValueUtil.randomOrderedFixedLengthKey(rand, i, 30);
|
||||
|
||||
// Key will be interpreted by KeyValue.KEY_COMPARATOR
|
||||
KeyValue kv =
|
||||
new KeyValue(row, family, qualifier, EnvironmentEdgeManager.currentTime(),
|
||||
RandomKeyValueUtil.randomFixedLengthValue(rand, SMALL_BLOCK_SIZE));
|
||||
writer.append(kv);
|
||||
}
|
||||
writer.close();
|
||||
}
|
||||
|
||||
// close hfile.block.index.cacheonwrite
|
||||
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
|
||||
|
||||
// Read the HFile
|
||||
HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf, conf);
|
||||
|
||||
boolean hasArrayIndexOutOfBoundsException = false;
|
||||
try {
|
||||
// get the mid-key.
|
||||
reader.midkey();
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
hasArrayIndexOutOfBoundsException = true;
|
||||
} finally {
|
||||
reader.close();
|
||||
}
|
||||
|
||||
// to check if ArrayIndexOutOfBoundsException occured
|
||||
assertFalse(hasArrayIndexOutOfBoundsException);
|
||||
}
|
||||
|
||||
/**
|
||||
* Testing block index through the HFile writer/reader APIs. Allows to test
|
||||
* setting index block size through configuration, intermediate-level index
|
||||
|
|
Loading…
Reference in New Issue