HBASE-15060 Cull TestHFileV2 and HFileWriterFactory
This commit is contained in:
parent
c1b6d47e79
commit
92abf8ac57
|
@ -241,7 +241,7 @@ public class HFile {
|
||||||
* This variety of ways to construct writers is used throughout the code, and
|
* This variety of ways to construct writers is used throughout the code, and
|
||||||
* we want to be able to swap writer implementations.
|
* we want to be able to swap writer implementations.
|
||||||
*/
|
*/
|
||||||
public static abstract class WriterFactory {
|
public static class WriterFactory {
|
||||||
protected final Configuration conf;
|
protected final Configuration conf;
|
||||||
protected final CacheConfig cacheConf;
|
protected final CacheConfig cacheConf;
|
||||||
protected FileSystem fs;
|
protected FileSystem fs;
|
||||||
|
@ -309,12 +309,8 @@ public class HFile {
|
||||||
else if (LOG.isDebugEnabled()) LOG.debug("Unable to set drop behind on " + path);
|
else if (LOG.isDebugEnabled()) LOG.debug("Unable to set drop behind on " + path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return createWriter(fs, path, ostream,
|
return new HFileWriterImpl(conf, cacheConf, path, ostream, comparator, fileContext);
|
||||||
comparator, fileContext);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected abstract Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream,
|
|
||||||
CellComparator comparator, HFileContext fileContext) throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** The configuration key for HFile version to use for new files */
|
/** The configuration key for HFile version to use for new files */
|
||||||
|
@ -351,7 +347,7 @@ public class HFile {
|
||||||
" hfiles only (but it can read v2 files without having to update hfile.format.version " +
|
" hfiles only (but it can read v2 files without having to update hfile.format.version " +
|
||||||
"in hbase-site.xml)");
|
"in hbase-site.xml)");
|
||||||
case 3:
|
case 3:
|
||||||
return new HFileWriterFactory(conf, cacheConf);
|
return new HFile.WriterFactory(conf, cacheConf);
|
||||||
default:
|
default:
|
||||||
throw new IllegalArgumentException("Cannot create writer for HFile " +
|
throw new IllegalArgumentException("Cannot create writer for HFile " +
|
||||||
"format version " + version);
|
"format version " + version);
|
||||||
|
|
|
@ -1,40 +0,0 @@
|
||||||
/**
|
|
||||||
* Copyright The Apache Software Foundation
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with this
|
|
||||||
* work for additional information regarding copyright ownership. The ASF
|
|
||||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
* License for the specific language governing permissions and limitationsME
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase.io.hfile;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.hbase.CellComparator;
|
|
||||||
|
|
||||||
public class HFileWriterFactory extends HFile.WriterFactory {
|
|
||||||
HFileWriterFactory(Configuration conf, CacheConfig cacheConf) {
|
|
||||||
super(conf, cacheConf);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public HFile.Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream,
|
|
||||||
CellComparator comparator, HFileContext context)
|
|
||||||
throws IOException {
|
|
||||||
return new HFileWriterImpl(conf, cacheConf, path, ostream, comparator, context);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,104 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.io.hfile;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* These helper methods generate random byte[]'s data for KeyValues
|
||||||
|
*/
|
||||||
|
public class RandomKeyValueUtil {
|
||||||
|
public static final String COLUMN_FAMILY_NAME = "_-myColumnFamily-_";
|
||||||
|
private static final int MIN_ROW_OR_QUALIFIER_LENGTH = 64;
|
||||||
|
private static final int MAX_ROW_OR_QUALIFIER_LENGTH = 128;
|
||||||
|
|
||||||
|
public static final char randomReadableChar(Random rand) {
|
||||||
|
int i = rand.nextInt(26 * 2 + 10 + 1);
|
||||||
|
if (i < 26)
|
||||||
|
return (char) ('A' + i);
|
||||||
|
i -= 26;
|
||||||
|
|
||||||
|
if (i < 26)
|
||||||
|
return (char) ('a' + i);
|
||||||
|
i -= 26;
|
||||||
|
|
||||||
|
if (i < 10)
|
||||||
|
return (char) ('0' + i);
|
||||||
|
i -= 10;
|
||||||
|
|
||||||
|
assert i == 0;
|
||||||
|
return '_';
|
||||||
|
}
|
||||||
|
|
||||||
|
public static KeyValue randomKeyValue(Random rand) {
|
||||||
|
return new KeyValue(randomRowOrQualifier(rand),
|
||||||
|
COLUMN_FAMILY_NAME.getBytes(), randomRowOrQualifier(rand),
|
||||||
|
randomValue(rand));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static byte[] randomRowOrQualifier(Random rand) {
|
||||||
|
StringBuilder field = new StringBuilder();
|
||||||
|
int fieldLen = MIN_ROW_OR_QUALIFIER_LENGTH
|
||||||
|
+ rand.nextInt(MAX_ROW_OR_QUALIFIER_LENGTH
|
||||||
|
- MIN_ROW_OR_QUALIFIER_LENGTH + 1);
|
||||||
|
for (int i = 0; i < fieldLen; ++i)
|
||||||
|
field.append(randomReadableChar(rand));
|
||||||
|
return field.toString().getBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static byte[] randomValue(Random rand) {
|
||||||
|
StringBuilder v = new StringBuilder();
|
||||||
|
for (int j = 0; j < 1 + rand.nextInt(2000); ++j) {
|
||||||
|
v.append((char) (32 + rand.nextInt(95)));
|
||||||
|
}
|
||||||
|
|
||||||
|
byte[] valueBytes = v.toString().getBytes();
|
||||||
|
return valueBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates a random key that is guaranteed to increase as the given index i
|
||||||
|
* increases. The result consists of a prefix, which is a deterministic
|
||||||
|
* increasing function of i, and a random suffix.
|
||||||
|
*
|
||||||
|
* @param rand
|
||||||
|
* random number generator to use
|
||||||
|
* @param i
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public static byte[] randomOrderedKey(Random rand, int i) {
|
||||||
|
StringBuilder k = new StringBuilder();
|
||||||
|
|
||||||
|
// The fixed-length lexicographically increasing part of the key.
|
||||||
|
for (int bitIndex = 31; bitIndex >= 0; --bitIndex) {
|
||||||
|
if ((i & (1 << bitIndex)) == 0)
|
||||||
|
k.append("a");
|
||||||
|
else
|
||||||
|
k.append("b");
|
||||||
|
}
|
||||||
|
|
||||||
|
// A random-length random suffix of the key.
|
||||||
|
for (int j = 0; j < rand.nextInt(50); ++j)
|
||||||
|
k.append(randomReadableChar(rand));
|
||||||
|
|
||||||
|
byte[] keyBytes = k.toString().getBytes();
|
||||||
|
return keyBytes;
|
||||||
|
}
|
||||||
|
}
|
|
@ -380,9 +380,9 @@ public class TestCacheOnWrite {
|
||||||
.withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build();
|
.withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build();
|
||||||
byte[] cf = Bytes.toBytes("fam");
|
byte[] cf = Bytes.toBytes("fam");
|
||||||
for (int i = 0; i < NUM_KV; ++i) {
|
for (int i = 0; i < NUM_KV; ++i) {
|
||||||
byte[] row = TestHFileWriterV2.randomOrderedKey(rand, i);
|
byte[] row = RandomKeyValueUtil.randomOrderedKey(rand, i);
|
||||||
byte[] qualifier = TestHFileWriterV2.randomRowOrQualifier(rand);
|
byte[] qualifier = RandomKeyValueUtil.randomRowOrQualifier(rand);
|
||||||
byte[] value = TestHFileWriterV2.randomValue(rand);
|
byte[] value = RandomKeyValueUtil.randomValue(rand);
|
||||||
KeyValue kv;
|
KeyValue kv;
|
||||||
if(useTags) {
|
if(useTags) {
|
||||||
Tag t = new Tag((byte) 1, "visibility");
|
Tag t = new Tag((byte) 1, "visibility");
|
||||||
|
|
|
@ -280,7 +280,7 @@ public class TestHFileBlockIndex {
|
||||||
byte[] qualifier = Bytes.toBytes("q");
|
byte[] qualifier = Bytes.toBytes("q");
|
||||||
for (int j = 0; j < 16; ++j) {
|
for (int j = 0; j < 16; ++j) {
|
||||||
byte[] k =
|
byte[] k =
|
||||||
new KeyValue(TestHFileWriterV2.randomOrderedKey(rand, i * 16 + j), family, qualifier,
|
new KeyValue(RandomKeyValueUtil.randomOrderedKey(rand, i * 16 + j), family, qualifier,
|
||||||
EnvironmentEdgeManager.currentTime(), KeyValue.Type.Put).getKey();
|
EnvironmentEdgeManager.currentTime(), KeyValue.Type.Put).getKey();
|
||||||
keys.add(k);
|
keys.add(k);
|
||||||
if (j == 8) {
|
if (j == 8) {
|
||||||
|
@ -348,7 +348,7 @@ public class TestHFileBlockIndex {
|
||||||
int secondaryIndexEntries[] = new int[numTotalKeys];
|
int secondaryIndexEntries[] = new int[numTotalKeys];
|
||||||
|
|
||||||
for (int i = 0; i < numTotalKeys; ++i) {
|
for (int i = 0; i < numTotalKeys; ++i) {
|
||||||
byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i * 2);
|
byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i * 2);
|
||||||
KeyValue cell = new KeyValue(k, Bytes.toBytes("f"), Bytes.toBytes("q"),
|
KeyValue cell = new KeyValue(k, Bytes.toBytes("f"), Bytes.toBytes("q"),
|
||||||
Bytes.toBytes("val"));
|
Bytes.toBytes("val"));
|
||||||
//KeyValue cell = new KeyValue.KeyOnlyKeyValue(k, 0, k.length);
|
//KeyValue cell = new KeyValue.KeyOnlyKeyValue(k, 0, k.length);
|
||||||
|
@ -473,7 +473,7 @@ public class TestHFileBlockIndex {
|
||||||
c.writeRoot(dos);
|
c.writeRoot(dos);
|
||||||
assertEquals(c.getRootSize(), dos.size());
|
assertEquals(c.getRootSize(), dos.size());
|
||||||
|
|
||||||
byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
|
byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i);
|
||||||
numSubEntries += rand.nextInt(5) + 1;
|
numSubEntries += rand.nextInt(5) + 1;
|
||||||
keys.add(k);
|
keys.add(k);
|
||||||
c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
|
c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
|
||||||
|
@ -556,12 +556,12 @@ public class TestHFileBlockIndex {
|
||||||
byte[] family = Bytes.toBytes("f");
|
byte[] family = Bytes.toBytes("f");
|
||||||
byte[] qualifier = Bytes.toBytes("q");
|
byte[] qualifier = Bytes.toBytes("q");
|
||||||
for (int i = 0; i < NUM_KV; ++i) {
|
for (int i = 0; i < NUM_KV; ++i) {
|
||||||
byte[] row = TestHFileWriterV2.randomOrderedKey(rand, i);
|
byte[] row = RandomKeyValueUtil.randomOrderedKey(rand, i);
|
||||||
|
|
||||||
// Key will be interpreted by KeyValue.KEY_COMPARATOR
|
// Key will be interpreted by KeyValue.KEY_COMPARATOR
|
||||||
KeyValue kv =
|
KeyValue kv =
|
||||||
new KeyValue(row, family, qualifier, EnvironmentEdgeManager.currentTime(),
|
new KeyValue(row, family, qualifier, EnvironmentEdgeManager.currentTime(),
|
||||||
TestHFileWriterV2.randomValue(rand));
|
RandomKeyValueUtil.randomValue(rand));
|
||||||
byte[] k = kv.getKey();
|
byte[] k = kv.getKey();
|
||||||
writer.append(kv);
|
writer.append(kv);
|
||||||
keys[i] = k;
|
keys[i] = k;
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class TestHFileInlineToRootChunkConversion {
|
||||||
CacheConfig cacheConf = new CacheConfig(conf);
|
CacheConfig cacheConf = new CacheConfig(conf);
|
||||||
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
|
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
|
||||||
HFileContext context = new HFileContextBuilder().withBlockSize(16).build();
|
HFileContext context = new HFileContextBuilder().withBlockSize(16).build();
|
||||||
HFile.Writer hfw = new HFileWriterFactory(conf, cacheConf)
|
HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf)
|
||||||
.withFileContext(context)
|
.withFileContext(context)
|
||||||
.withPath(fs, hfPath).create();
|
.withPath(fs, hfPath).create();
|
||||||
List<byte[]> keys = new ArrayList<byte[]>();
|
List<byte[]> keys = new ArrayList<byte[]>();
|
||||||
|
|
|
@ -1,342 +0,0 @@
|
||||||
/**
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.io.hfile;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
|
||||||
import java.io.DataInputStream;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Random;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.hbase.Cell;
|
|
||||||
import org.apache.hadoop.hbase.CellComparator;
|
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
|
|
||||||
import org.apache.hadoop.hbase.nio.ByteBuff;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.IOTests;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.experimental.categories.Category;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Testing writing a version 2 {@link HFile}. This is a low-level test written
|
|
||||||
* during the development of {@link HFileWriterImpl}.
|
|
||||||
*/
|
|
||||||
@Category({IOTests.class, SmallTests.class})
|
|
||||||
public class TestHFileWriterV2 {
|
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(TestHFileWriterV2.class);
|
|
||||||
|
|
||||||
private static final HBaseTestingUtility TEST_UTIL =
|
|
||||||
new HBaseTestingUtility();
|
|
||||||
|
|
||||||
private Configuration conf;
|
|
||||||
private FileSystem fs;
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void setUp() throws IOException {
|
|
||||||
conf = TEST_UTIL.getConfiguration();
|
|
||||||
fs = FileSystem.get(conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testHFileFormatV2() throws IOException {
|
|
||||||
Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), "testHFileFormatV2");
|
|
||||||
final Compression.Algorithm compressAlgo = Compression.Algorithm.GZ;
|
|
||||||
final int entryCount = 10000;
|
|
||||||
writeDataAndReadFromHFile(hfilePath, compressAlgo, entryCount, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testMidKeyInHFile() throws IOException{
|
|
||||||
Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
|
|
||||||
"testMidKeyInHFile");
|
|
||||||
Compression.Algorithm compressAlgo = Compression.Algorithm.NONE;
|
|
||||||
int entryCount = 50000;
|
|
||||||
writeDataAndReadFromHFile(hfilePath, compressAlgo, entryCount, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void writeDataAndReadFromHFile(Path hfilePath,
|
|
||||||
Algorithm compressAlgo, int entryCount, boolean findMidKey) throws IOException {
|
|
||||||
|
|
||||||
HFileContext context = new HFileContextBuilder()
|
|
||||||
.withBlockSize(4096)
|
|
||||||
.withCompression(compressAlgo)
|
|
||||||
.build();
|
|
||||||
HFile.Writer writer = new HFileWriterFactory(conf, new CacheConfig(conf))
|
|
||||||
.withPath(fs, hfilePath)
|
|
||||||
.withFileContext(context)
|
|
||||||
.create();
|
|
||||||
|
|
||||||
Random rand = new Random(9713312); // Just a fixed seed.
|
|
||||||
List<KeyValue> keyValues = new ArrayList<KeyValue>(entryCount);
|
|
||||||
|
|
||||||
for (int i = 0; i < entryCount; ++i) {
|
|
||||||
byte[] keyBytes = randomOrderedKey(rand, i);
|
|
||||||
|
|
||||||
// A random-length random value.
|
|
||||||
byte[] valueBytes = randomValue(rand);
|
|
||||||
KeyValue keyValue = new KeyValue(keyBytes, null, null, valueBytes);
|
|
||||||
writer.append(keyValue);
|
|
||||||
keyValues.add(keyValue);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add in an arbitrary order. They will be sorted lexicographically by
|
|
||||||
// the key.
|
|
||||||
writer.appendMetaBlock("CAPITAL_OF_USA", new Text("Washington, D.C."));
|
|
||||||
writer.appendMetaBlock("CAPITAL_OF_RUSSIA", new Text("Moscow"));
|
|
||||||
writer.appendMetaBlock("CAPITAL_OF_FRANCE", new Text("Paris"));
|
|
||||||
|
|
||||||
writer.close();
|
|
||||||
|
|
||||||
|
|
||||||
FSDataInputStream fsdis = fs.open(hfilePath);
|
|
||||||
|
|
||||||
// A "manual" version of a new-format HFile reader. This unit test was
|
|
||||||
// written before the V2 reader was fully implemented.
|
|
||||||
|
|
||||||
long fileSize = fs.getFileStatus(hfilePath).getLen();
|
|
||||||
FixedFileTrailer trailer =
|
|
||||||
FixedFileTrailer.readFromStream(fsdis, fileSize);
|
|
||||||
|
|
||||||
assertEquals(entryCount, trailer.getEntryCount());
|
|
||||||
|
|
||||||
HFileContext meta = new HFileContextBuilder()
|
|
||||||
.withHBaseCheckSum(true)
|
|
||||||
.withIncludesMvcc(false)
|
|
||||||
.withIncludesTags(false)
|
|
||||||
.withCompression(compressAlgo)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(fsdis, fileSize, meta);
|
|
||||||
// Comparator class name is stored in the trailer in version 2.
|
|
||||||
CellComparator comparator = trailer.createComparator();
|
|
||||||
HFileBlockIndex.BlockIndexReader dataBlockIndexReader =
|
|
||||||
new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator,
|
|
||||||
trailer.getNumDataIndexLevels());
|
|
||||||
HFileBlockIndex.BlockIndexReader metaBlockIndexReader =
|
|
||||||
new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);
|
|
||||||
|
|
||||||
HFileBlock.BlockIterator blockIter = blockReader.blockRange(
|
|
||||||
trailer.getLoadOnOpenDataOffset(),
|
|
||||||
fileSize - trailer.getTrailerSize());
|
|
||||||
// Data index. We also read statistics about the block index written after
|
|
||||||
// the root level.
|
|
||||||
dataBlockIndexReader.readMultiLevelIndexRoot(
|
|
||||||
blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
|
|
||||||
trailer.getDataIndexCount());
|
|
||||||
|
|
||||||
if (findMidKey) {
|
|
||||||
Cell midkey = dataBlockIndexReader.midkey();
|
|
||||||
assertNotNull("Midkey should not be null", midkey);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Meta index.
|
|
||||||
metaBlockIndexReader.readRootIndex(
|
|
||||||
blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX)
|
|
||||||
.getByteStream(), trailer.getMetaIndexCount());
|
|
||||||
// File info
|
|
||||||
FileInfo fileInfo = new FileInfo();
|
|
||||||
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
|
|
||||||
byte [] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION);
|
|
||||||
boolean includeMemstoreTS = keyValueFormatVersion != null &&
|
|
||||||
Bytes.toInt(keyValueFormatVersion) > 0;
|
|
||||||
|
|
||||||
// Counters for the number of key/value pairs and the number of blocks
|
|
||||||
int entriesRead = 0;
|
|
||||||
int blocksRead = 0;
|
|
||||||
long memstoreTS = 0;
|
|
||||||
|
|
||||||
// Scan blocks the way the reader would scan them
|
|
||||||
fsdis.seek(0);
|
|
||||||
long curBlockPos = 0;
|
|
||||||
while (curBlockPos <= trailer.getLastDataBlockOffset()) {
|
|
||||||
HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false);
|
|
||||||
assertEquals(BlockType.DATA, block.getBlockType());
|
|
||||||
if (meta.isCompressedOrEncrypted()) {
|
|
||||||
assertFalse(block.isUnpacked());
|
|
||||||
block = block.unpack(meta, blockReader);
|
|
||||||
}
|
|
||||||
ByteBuff buf = block.getBufferWithoutHeader();
|
|
||||||
while (buf.hasRemaining()) {
|
|
||||||
int keyLen = buf.getInt();
|
|
||||||
int valueLen = buf.getInt();
|
|
||||||
|
|
||||||
byte[] key = new byte[keyLen];
|
|
||||||
buf.get(key);
|
|
||||||
|
|
||||||
byte[] value = new byte[valueLen];
|
|
||||||
buf.get(value);
|
|
||||||
|
|
||||||
if (includeMemstoreTS) {
|
|
||||||
ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(),
|
|
||||||
buf.arrayOffset() + buf.position(), buf.remaining());
|
|
||||||
DataInputStream data_input = new DataInputStream(byte_input);
|
|
||||||
|
|
||||||
memstoreTS = WritableUtils.readVLong(data_input);
|
|
||||||
buf.position(buf.position() + WritableUtils.getVIntSize(memstoreTS));
|
|
||||||
}
|
|
||||||
|
|
||||||
// A brute-force check to see that all keys and values are correct.
|
|
||||||
KeyValue kv = keyValues.get(entriesRead);
|
|
||||||
assertTrue(Bytes.compareTo(key, kv.getKey()) == 0);
|
|
||||||
assertTrue(Bytes.compareTo(value, 0, value.length, kv.getValueArray(), kv.getValueOffset(),
|
|
||||||
kv.getValueLength()) == 0);
|
|
||||||
|
|
||||||
++entriesRead;
|
|
||||||
}
|
|
||||||
++blocksRead;
|
|
||||||
curBlockPos += block.getOnDiskSizeWithHeader();
|
|
||||||
}
|
|
||||||
LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead="
|
|
||||||
+ blocksRead);
|
|
||||||
assertEquals(entryCount, entriesRead);
|
|
||||||
|
|
||||||
// Meta blocks. We can scan until the load-on-open data offset (which is
|
|
||||||
// the root block index offset in version 2) because we are not testing
|
|
||||||
// intermediate-level index blocks here.
|
|
||||||
|
|
||||||
int metaCounter = 0;
|
|
||||||
while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) {
|
|
||||||
LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " +
|
|
||||||
trailer.getLoadOnOpenDataOffset());
|
|
||||||
HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false)
|
|
||||||
.unpack(meta, blockReader);
|
|
||||||
assertEquals(BlockType.META, block.getBlockType());
|
|
||||||
Text t = new Text();
|
|
||||||
ByteBuff buf = block.getBufferWithoutHeader();
|
|
||||||
if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) {
|
|
||||||
throw new IOException("Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName());
|
|
||||||
}
|
|
||||||
Text expectedText =
|
|
||||||
(metaCounter == 0 ? new Text("Paris") : metaCounter == 1 ? new Text(
|
|
||||||
"Moscow") : new Text("Washington, D.C."));
|
|
||||||
assertEquals(expectedText, t);
|
|
||||||
LOG.info("Read meta block data: " + t);
|
|
||||||
++metaCounter;
|
|
||||||
curBlockPos += block.getOnDiskSizeWithHeader();
|
|
||||||
}
|
|
||||||
|
|
||||||
fsdis.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Static stuff used by various HFile v2 unit tests
|
|
||||||
|
|
||||||
public static final String COLUMN_FAMILY_NAME = "_-myColumnFamily-_";
|
|
||||||
private static final int MIN_ROW_OR_QUALIFIER_LENGTH = 64;
|
|
||||||
private static final int MAX_ROW_OR_QUALIFIER_LENGTH = 128;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generates a random key that is guaranteed to increase as the given index i
|
|
||||||
* increases. The result consists of a prefix, which is a deterministic
|
|
||||||
* increasing function of i, and a random suffix.
|
|
||||||
*
|
|
||||||
* @param rand
|
|
||||||
* random number generator to use
|
|
||||||
* @param i
|
|
||||||
* @return
|
|
||||||
*/
|
|
||||||
public static byte[] randomOrderedKey(Random rand, int i) {
|
|
||||||
StringBuilder k = new StringBuilder();
|
|
||||||
|
|
||||||
// The fixed-length lexicographically increasing part of the key.
|
|
||||||
for (int bitIndex = 31; bitIndex >= 0; --bitIndex) {
|
|
||||||
if ((i & (1 << bitIndex)) == 0)
|
|
||||||
k.append("a");
|
|
||||||
else
|
|
||||||
k.append("b");
|
|
||||||
}
|
|
||||||
|
|
||||||
// A random-length random suffix of the key.
|
|
||||||
for (int j = 0; j < rand.nextInt(50); ++j)
|
|
||||||
k.append(randomReadableChar(rand));
|
|
||||||
|
|
||||||
byte[] keyBytes = k.toString().getBytes();
|
|
||||||
return keyBytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static byte[] randomValue(Random rand) {
|
|
||||||
StringBuilder v = new StringBuilder();
|
|
||||||
for (int j = 0; j < 1 + rand.nextInt(2000); ++j) {
|
|
||||||
v.append((char) (32 + rand.nextInt(95)));
|
|
||||||
}
|
|
||||||
|
|
||||||
byte[] valueBytes = v.toString().getBytes();
|
|
||||||
return valueBytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static final char randomReadableChar(Random rand) {
|
|
||||||
int i = rand.nextInt(26 * 2 + 10 + 1);
|
|
||||||
if (i < 26)
|
|
||||||
return (char) ('A' + i);
|
|
||||||
i -= 26;
|
|
||||||
|
|
||||||
if (i < 26)
|
|
||||||
return (char) ('a' + i);
|
|
||||||
i -= 26;
|
|
||||||
|
|
||||||
if (i < 10)
|
|
||||||
return (char) ('0' + i);
|
|
||||||
i -= 10;
|
|
||||||
|
|
||||||
assert i == 0;
|
|
||||||
return '_';
|
|
||||||
}
|
|
||||||
|
|
||||||
public static byte[] randomRowOrQualifier(Random rand) {
|
|
||||||
StringBuilder field = new StringBuilder();
|
|
||||||
int fieldLen = MIN_ROW_OR_QUALIFIER_LENGTH
|
|
||||||
+ rand.nextInt(MAX_ROW_OR_QUALIFIER_LENGTH
|
|
||||||
- MIN_ROW_OR_QUALIFIER_LENGTH + 1);
|
|
||||||
for (int i = 0; i < fieldLen; ++i)
|
|
||||||
field.append(randomReadableChar(rand));
|
|
||||||
return field.toString().getBytes();
|
|
||||||
}
|
|
||||||
|
|
||||||
public static KeyValue randomKeyValue(Random rand) {
|
|
||||||
return new KeyValue(randomRowOrQualifier(rand),
|
|
||||||
COLUMN_FAMILY_NAME.getBytes(), randomRowOrQualifier(rand),
|
|
||||||
randomValue(rand));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
|
@ -120,7 +120,7 @@ public class TestHFileWriterV3 {
|
||||||
.withBlockSize(4096)
|
.withBlockSize(4096)
|
||||||
.withIncludesTags(useTags)
|
.withIncludesTags(useTags)
|
||||||
.withCompression(compressAlgo).build();
|
.withCompression(compressAlgo).build();
|
||||||
HFile.Writer writer = new HFileWriterFactory(conf, new CacheConfig(conf))
|
HFile.Writer writer = new HFile.WriterFactory(conf, new CacheConfig(conf))
|
||||||
.withPath(fs, hfilePath)
|
.withPath(fs, hfilePath)
|
||||||
.withFileContext(context)
|
.withFileContext(context)
|
||||||
.withComparator(CellComparator.COMPARATOR)
|
.withComparator(CellComparator.COMPARATOR)
|
||||||
|
@ -130,10 +130,10 @@ public class TestHFileWriterV3 {
|
||||||
List<KeyValue> keyValues = new ArrayList<KeyValue>(entryCount);
|
List<KeyValue> keyValues = new ArrayList<KeyValue>(entryCount);
|
||||||
|
|
||||||
for (int i = 0; i < entryCount; ++i) {
|
for (int i = 0; i < entryCount; ++i) {
|
||||||
byte[] keyBytes = TestHFileWriterV2.randomOrderedKey(rand, i);
|
byte[] keyBytes = RandomKeyValueUtil.randomOrderedKey(rand, i);
|
||||||
|
|
||||||
// A random-length random value.
|
// A random-length random value.
|
||||||
byte[] valueBytes = TestHFileWriterV2.randomValue(rand);
|
byte[] valueBytes = RandomKeyValueUtil.randomValue(rand);
|
||||||
KeyValue keyValue = null;
|
KeyValue keyValue = null;
|
||||||
if (useTags) {
|
if (useTags) {
|
||||||
ArrayList<Tag> tags = new ArrayList<Tag>();
|
ArrayList<Tag> tags = new ArrayList<Tag>();
|
||||||
|
@ -176,7 +176,7 @@ public class TestHFileWriterV3 {
|
||||||
.withHBaseCheckSum(true).build();
|
.withHBaseCheckSum(true).build();
|
||||||
HFileBlock.FSReader blockReader =
|
HFileBlock.FSReader blockReader =
|
||||||
new HFileBlock.FSReaderImpl(fsdis, fileSize, meta);
|
new HFileBlock.FSReaderImpl(fsdis, fileSize, meta);
|
||||||
// Comparator class name is stored in the trailer in version 2.
|
// Comparator class name is stored in the trailer in version 3.
|
||||||
CellComparator comparator = trailer.createComparator();
|
CellComparator comparator = trailer.createComparator();
|
||||||
HFileBlockIndex.BlockIndexReader dataBlockIndexReader =
|
HFileBlockIndex.BlockIndexReader dataBlockIndexReader =
|
||||||
new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator,
|
new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator,
|
||||||
|
@ -297,6 +297,5 @@ public class TestHFileWriterV3 {
|
||||||
|
|
||||||
fsdis.close();
|
fsdis.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,7 @@ public class TestLazyDataBlockDecompression {
|
||||||
*/
|
*/
|
||||||
private static void writeHFile(Configuration conf, CacheConfig cc, FileSystem fs, Path path,
|
private static void writeHFile(Configuration conf, CacheConfig cc, FileSystem fs, Path path,
|
||||||
HFileContext cxt, int entryCount) throws IOException {
|
HFileContext cxt, int entryCount) throws IOException {
|
||||||
HFile.Writer writer = new HFileWriterFactory(conf, cc)
|
HFile.Writer writer = new HFile.WriterFactory(conf, cc)
|
||||||
.withPath(fs, path)
|
.withPath(fs, path)
|
||||||
.withFileContext(cxt)
|
.withFileContext(cxt)
|
||||||
.create();
|
.create();
|
||||||
|
@ -100,8 +100,8 @@ public class TestLazyDataBlockDecompression {
|
||||||
final byte[] qualifier = Bytes.toBytes("q");
|
final byte[] qualifier = Bytes.toBytes("q");
|
||||||
|
|
||||||
for (int i = 0; i < entryCount; i++) {
|
for (int i = 0; i < entryCount; i++) {
|
||||||
byte[] keyBytes = TestHFileWriterV2.randomOrderedKey(rand, i);
|
byte[] keyBytes = RandomKeyValueUtil.randomOrderedKey(rand, i);
|
||||||
byte[] valueBytes = TestHFileWriterV2.randomValue(rand);
|
byte[] valueBytes = RandomKeyValueUtil.randomValue(rand);
|
||||||
// make a real keyvalue so that hfile tool can examine it
|
// make a real keyvalue so that hfile tool can examine it
|
||||||
writer.append(new KeyValue(keyBytes, family, qualifier, valueBytes));
|
writer.append(new KeyValue(keyBytes, family, qualifier, valueBytes));
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,8 +111,8 @@ public class TestPrefetch {
|
||||||
|
|
||||||
final int rowLen = 32;
|
final int rowLen = 32;
|
||||||
for (int i = 0; i < NUM_KV; ++i) {
|
for (int i = 0; i < NUM_KV; ++i) {
|
||||||
byte[] k = TestHFileWriterV2.randomOrderedKey(RNG, i);
|
byte[] k = RandomKeyValueUtil.randomOrderedKey(RNG, i);
|
||||||
byte[] v = TestHFileWriterV2.randomValue(RNG);
|
byte[] v = RandomKeyValueUtil.randomValue(RNG);
|
||||||
int cfLen = RNG.nextInt(k.length - rowLen + 1);
|
int cfLen = RNG.nextInt(k.length - rowLen + 1);
|
||||||
KeyValue kv = new KeyValue(
|
KeyValue kv = new KeyValue(
|
||||||
k, 0, rowLen,
|
k, 0, rowLen,
|
||||||
|
|
|
@ -123,9 +123,9 @@ public class TestSeekBeforeWithInlineBlocks {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
for (int i = 0; i < NUM_KV; i++) {
|
for (int i = 0; i < NUM_KV; i++) {
|
||||||
byte[] row = TestHFileWriterV2.randomOrderedKey(RAND, i);
|
byte[] row = RandomKeyValueUtil.randomOrderedKey(RAND, i);
|
||||||
byte[] qual = TestHFileWriterV2.randomRowOrQualifier(RAND);
|
byte[] qual = RandomKeyValueUtil.randomRowOrQualifier(RAND);
|
||||||
byte[] value = TestHFileWriterV2.randomValue(RAND);
|
byte[] value = RandomKeyValueUtil.randomValue(RAND);
|
||||||
KeyValue kv = new KeyValue(row, FAM, qual, value);
|
KeyValue kv = new KeyValue(row, FAM, qual, value);
|
||||||
|
|
||||||
storeFileWriter.append(kv);
|
storeFileWriter.append(kv);
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
@ -49,7 +50,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||||
import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
|
|
||||||
import org.apache.hadoop.hbase.wal.DefaultWALProvider;
|
import org.apache.hadoop.hbase.wal.DefaultWALProvider;
|
||||||
import org.apache.hadoop.hbase.wal.WALFactory;
|
import org.apache.hadoop.hbase.wal.WALFactory;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -276,8 +276,8 @@ public class TestCacheOnWriteInSchema {
|
||||||
private void writeStoreFile(StoreFile.Writer writer) throws IOException {
|
private void writeStoreFile(StoreFile.Writer writer) throws IOException {
|
||||||
final int rowLen = 32;
|
final int rowLen = 32;
|
||||||
for (int i = 0; i < NUM_KV; ++i) {
|
for (int i = 0; i < NUM_KV; ++i) {
|
||||||
byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
|
byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i);
|
||||||
byte[] v = TestHFileWriterV2.randomValue(rand);
|
byte[] v = RandomKeyValueUtil.randomValue(rand);
|
||||||
int cfLen = rand.nextInt(k.length - rowLen + 1);
|
int cfLen = rand.nextInt(k.length - rowLen + 1);
|
||||||
KeyValue kv = new KeyValue(
|
KeyValue kv = new KeyValue(
|
||||||
k, 0, rowLen,
|
k, 0, rowLen,
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
|
@ -53,7 +54,6 @@ import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||||
import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
|
|
||||||
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.BloomFilterUtil;
|
import org.apache.hadoop.hbase.util.BloomFilterUtil;
|
||||||
|
@ -144,7 +144,7 @@ public class TestCompoundBloomFilter {
|
||||||
private List<KeyValue> createSortedKeyValues(Random rand, int n) {
|
private List<KeyValue> createSortedKeyValues(Random rand, int n) {
|
||||||
List<KeyValue> kvList = new ArrayList<KeyValue>(n);
|
List<KeyValue> kvList = new ArrayList<KeyValue>(n);
|
||||||
for (int i = 0; i < n; ++i)
|
for (int i = 0; i < n; ++i)
|
||||||
kvList.add(TestHFileWriterV2.randomKeyValue(rand));
|
kvList.add(RandomKeyValueUtil.randomKeyValue(rand));
|
||||||
Collections.sort(kvList, CellComparator.COMPARATOR);
|
Collections.sort(kvList, CellComparator.COMPARATOR);
|
||||||
return kvList;
|
return kvList;
|
||||||
}
|
}
|
||||||
|
@ -231,7 +231,7 @@ public class TestCompoundBloomFilter {
|
||||||
Random rand = new Random(EVALUATION_SEED);
|
Random rand = new Random(EVALUATION_SEED);
|
||||||
int nTrials = NUM_KV[t] * 10;
|
int nTrials = NUM_KV[t] * 10;
|
||||||
for (int i = 0; i < nTrials; ++i) {
|
for (int i = 0; i < nTrials; ++i) {
|
||||||
byte[] query = TestHFileWriterV2.randomRowOrQualifier(rand);
|
byte[] query = RandomKeyValueUtil.randomRowOrQualifier(rand);
|
||||||
if (isInBloom(scanner, query, bt, rand)) {
|
if (isInBloom(scanner, query, bt, rand)) {
|
||||||
numFalsePos += 1;
|
numFalsePos += 1;
|
||||||
}
|
}
|
||||||
|
@ -280,16 +280,16 @@ public class TestCompoundBloomFilter {
|
||||||
|
|
||||||
private boolean isInBloom(StoreFileScanner scanner, byte[] row, BloomType bt,
|
private boolean isInBloom(StoreFileScanner scanner, byte[] row, BloomType bt,
|
||||||
Random rand) {
|
Random rand) {
|
||||||
return isInBloom(scanner, row, TestHFileWriterV2.randomRowOrQualifier(rand));
|
return isInBloom(scanner, row, RandomKeyValueUtil.randomRowOrQualifier(rand));
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isInBloom(StoreFileScanner scanner, byte[] row,
|
private boolean isInBloom(StoreFileScanner scanner, byte[] row,
|
||||||
byte[] qualifier) {
|
byte[] qualifier) {
|
||||||
Scan scan = new Scan(row, row);
|
Scan scan = new Scan(row, row);
|
||||||
scan.addColumn(Bytes.toBytes(TestHFileWriterV2.COLUMN_FAMILY_NAME), qualifier);
|
scan.addColumn(Bytes.toBytes(RandomKeyValueUtil.COLUMN_FAMILY_NAME), qualifier);
|
||||||
Store store = mock(Store.class);
|
Store store = mock(Store.class);
|
||||||
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
|
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
|
||||||
when(hcd.getName()).thenReturn(Bytes.toBytes(TestHFileWriterV2.COLUMN_FAMILY_NAME));
|
when(hcd.getName()).thenReturn(Bytes.toBytes(RandomKeyValueUtil.COLUMN_FAMILY_NAME));
|
||||||
when(store.getFamily()).thenReturn(hcd);
|
when(store.getFamily()).thenReturn(hcd);
|
||||||
return scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
|
return scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue