HBASE-9247 Cleanup Key/KV/Meta/MetaKey Comparators
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1518817 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d122e7b349
commit
739f438176
|
@ -19,16 +19,6 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellScannable;
|
||||
import org.apache.hadoop.hbase.CellScanner;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.SplitKeyValue;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.nio.BufferOverflowException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
|
@ -39,6 +29,15 @@ import java.util.Map;
|
|||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellScannable;
|
||||
import org.apache.hadoop.hbase.CellScanner;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Single row result of a {@link Get} or {@link Scan} query.<p>
|
||||
*
|
||||
|
@ -537,8 +536,7 @@ public class Result implements CellScannable {
|
|||
}
|
||||
this.familyMap = new TreeMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>(Bytes.BYTES_COMPARATOR);
|
||||
for(KeyValue kv : this.kvs) {
|
||||
SplitKeyValue splitKV = kv.split();
|
||||
byte [] family = splitKV.getFamily();
|
||||
byte [] family = kv.getFamily();
|
||||
NavigableMap<byte[], NavigableMap<Long, byte[]>> columnMap =
|
||||
familyMap.get(family);
|
||||
if(columnMap == null) {
|
||||
|
@ -546,7 +544,7 @@ public class Result implements CellScannable {
|
|||
(Bytes.BYTES_COMPARATOR);
|
||||
familyMap.put(family, columnMap);
|
||||
}
|
||||
byte [] qualifier = splitKV.getQualifier();
|
||||
byte [] qualifier = kv.getQualifier();
|
||||
NavigableMap<Long, byte[]> versionMap = columnMap.get(qualifier);
|
||||
if(versionMap == null) {
|
||||
versionMap = new TreeMap<Long, byte[]>(new Comparator<Long>() {
|
||||
|
@ -556,8 +554,9 @@ public class Result implements CellScannable {
|
|||
});
|
||||
columnMap.put(qualifier, versionMap);
|
||||
}
|
||||
Long timestamp = Bytes.toLong(splitKV.getTimestamp());
|
||||
byte [] value = splitKV.getValue();
|
||||
Long timestamp = kv.getTimestamp();
|
||||
byte [] value = kv.getValue();
|
||||
|
||||
versionMap.put(timestamp, value);
|
||||
}
|
||||
return this.familyMap;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hbase;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
|
@ -335,10 +335,10 @@ public final class TableName implements Comparable<TableName> {
|
|||
*
|
||||
* @return The comparator.
|
||||
*/
|
||||
public KeyComparator getRowComparator() {
|
||||
public KVComparator getRowComparator() {
|
||||
if(TableName.META_TABLE_NAME.equals(this)) {
|
||||
return KeyValue.META_COMPARATOR.getRawComparator();
|
||||
return KeyValue.META_COMPARATOR;
|
||||
}
|
||||
return KeyValue.COMPARATOR.getRawComparator();
|
||||
return KeyValue.COMPARATOR;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,12 +24,12 @@ import java.nio.ByteBuffer;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.SamePrefixComparator;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockType;
|
||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
/**
|
||||
|
@ -113,14 +113,14 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder {
|
|||
BufferedEncodedSeeker<STATE extends SeekerState>
|
||||
implements EncodedSeeker {
|
||||
|
||||
protected final RawComparator<byte[]> comparator;
|
||||
protected final KVComparator comparator;
|
||||
protected final SamePrefixComparator<byte[]> samePrefixComparator;
|
||||
protected ByteBuffer currentBuffer;
|
||||
protected STATE current = createSeekerState(); // always valid
|
||||
protected STATE previous = createSeekerState(); // may not be valid
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public BufferedEncodedSeeker(RawComparator<byte[]> comparator) {
|
||||
public BufferedEncodedSeeker(KVComparator comparator) {
|
||||
this.comparator = comparator;
|
||||
if (comparator instanceof SamePrefixComparator) {
|
||||
this.samePrefixComparator = (SamePrefixComparator<byte[]>) comparator;
|
||||
|
@ -207,7 +207,7 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder {
|
|||
comp = samePrefixComparator.compareIgnoringPrefix(commonPrefix, key,
|
||||
offset, length, current.keyBuffer, 0, current.keyLength);
|
||||
} else {
|
||||
comp = comparator.compare(key, offset, length,
|
||||
comp = comparator.compareFlatKey(key, offset, length,
|
||||
current.keyBuffer, 0, current.keyLength);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,9 +22,9 @@ import java.io.IOException;
|
|||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
||||
/**
|
||||
* Just copy data, do not do any kind of compression. Use for comparison and
|
||||
|
@ -67,7 +67,7 @@ public class CopyKeyDataBlockEncoder extends BufferedDataBlockEncoder {
|
|||
}
|
||||
|
||||
@Override
|
||||
public EncodedSeeker createSeeker(RawComparator<byte[]> comparator,
|
||||
public EncodedSeeker createSeeker(KVComparator comparator,
|
||||
final boolean includesMemstoreTS) {
|
||||
return new BufferedEncodedSeeker<SeekerState>(comparator) {
|
||||
@Override
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.nio.ByteBuffer;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
||||
|
@ -106,7 +107,7 @@ public interface DataBlockEncoder {
|
|||
* @return A newly created seeker.
|
||||
*/
|
||||
EncodedSeeker createSeeker(
|
||||
RawComparator<byte[]> comparator, boolean includesMemstoreTS
|
||||
KVComparator comparator, boolean includesMemstoreTS
|
||||
);
|
||||
|
||||
/**
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.nio.ByteBuffer;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
@ -422,7 +423,7 @@ public class DiffKeyDeltaEncoder extends BufferedDataBlockEncoder {
|
|||
}
|
||||
|
||||
@Override
|
||||
public EncodedSeeker createSeeker(RawComparator<byte[]> comparator,
|
||||
public EncodedSeeker createSeeker(KVComparator comparator,
|
||||
final boolean includesMemstoreTS) {
|
||||
return new BufferedEncodedSeeker<DiffSeekerState>(comparator) {
|
||||
private byte[] familyNameWithSize;
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.nio.ByteBuffer;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
@ -417,7 +418,7 @@ public class FastDiffDeltaEncoder extends BufferedDataBlockEncoder {
|
|||
}
|
||||
|
||||
@Override
|
||||
public EncodedSeeker createSeeker(RawComparator<byte[]> comparator,
|
||||
public EncodedSeeker createSeeker(KVComparator comparator,
|
||||
final boolean includesMemstoreTS) {
|
||||
return new BufferedEncodedSeeker<FastDiffSeekerState>(comparator) {
|
||||
private void decode(boolean isFirst) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.nio.ByteBuffer;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
@ -164,7 +165,7 @@ public class PrefixKeyDeltaEncoder extends BufferedDataBlockEncoder {
|
|||
}
|
||||
|
||||
@Override
|
||||
public EncodedSeeker createSeeker(RawComparator<byte[]> comparator,
|
||||
public EncodedSeeker createSeeker(KVComparator comparator,
|
||||
final boolean includesMemstoreTS) {
|
||||
return new BufferedEncodedSeeker<SeekerState>(comparator) {
|
||||
@Override
|
||||
|
|
|
@ -1575,7 +1575,7 @@ public class Bytes {
|
|||
* ranging from -(N + 1) to N - 1.
|
||||
*/
|
||||
public static int binarySearch(byte [][]arr, byte []key, int offset,
|
||||
int length, RawComparator<byte []> comparator) {
|
||||
int length, RawComparator<?> comparator) {
|
||||
int low = 0;
|
||||
int high = arr.length - 1;
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ import junit.framework.TestCase;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.MetaComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.Type;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -94,24 +93,13 @@ public class TestKeyValue extends TestCase {
|
|||
final byte [] b = Bytes.toBytes("bbb");
|
||||
final byte [] fam = Bytes.toBytes("col");
|
||||
final byte [] qf = Bytes.toBytes("umn");
|
||||
// final byte [] column = Bytes.toBytes("col:umn");
|
||||
KeyValue aaa = new KeyValue(a, fam, qf, a);
|
||||
KeyValue bbb = new KeyValue(b, fam, qf, b);
|
||||
byte [] keyabb = aaa.getKey();
|
||||
byte [] keybbb = bbb.getKey();
|
||||
assertTrue(KeyValue.COMPARATOR.compare(aaa, bbb) < 0);
|
||||
assertTrue(KeyValue.KEY_COMPARATOR.compare(keyabb, 0, keyabb.length, keybbb,
|
||||
0, keybbb.length) < 0);
|
||||
assertTrue(KeyValue.COMPARATOR.compare(bbb, aaa) > 0);
|
||||
assertTrue(KeyValue.KEY_COMPARATOR.compare(keybbb, 0, keybbb.length, keyabb,
|
||||
0, keyabb.length) > 0);
|
||||
// Compare breaks if passed same ByteBuffer as both left and right arguments.
|
||||
assertTrue(KeyValue.COMPARATOR.compare(bbb, bbb) == 0);
|
||||
assertTrue(KeyValue.KEY_COMPARATOR.compare(keybbb, 0, keybbb.length, keybbb,
|
||||
0, keybbb.length) == 0);
|
||||
assertTrue(KeyValue.COMPARATOR.compare(aaa, aaa) == 0);
|
||||
assertTrue(KeyValue.KEY_COMPARATOR.compare(keyabb, 0, keyabb.length, keyabb,
|
||||
0, keyabb.length) == 0);
|
||||
// Do compare with different timestamps.
|
||||
aaa = new KeyValue(a, fam, qf, 1, a);
|
||||
bbb = new KeyValue(a, fam, qf, 2, a);
|
||||
|
@ -299,7 +287,7 @@ public class TestKeyValue extends TestCase {
|
|||
assertTrue(cmp > 0);
|
||||
}
|
||||
|
||||
private void assertKVLessWithoutRow(KeyValue.KeyComparator c, int common, KeyValue less,
|
||||
private void assertKVLessWithoutRow(KeyValue.KVComparator c, int common, KeyValue less,
|
||||
KeyValue greater) {
|
||||
int cmp = c.compareIgnoringPrefix(common, less.getBuffer(), less.getOffset()
|
||||
+ KeyValue.ROW_OFFSET, less.getKeyLength(), greater.getBuffer(),
|
||||
|
@ -312,7 +300,7 @@ public class TestKeyValue extends TestCase {
|
|||
}
|
||||
|
||||
public void testCompareWithoutRow() {
|
||||
final KeyValue.KeyComparator c = KeyValue.KEY_COMPARATOR;
|
||||
final KeyValue.KVComparator c = KeyValue.COMPARATOR;
|
||||
byte[] row = Bytes.toBytes("row");
|
||||
|
||||
byte[] fa = Bytes.toBytes("fa");
|
||||
|
@ -462,14 +450,14 @@ public class TestKeyValue extends TestCase {
|
|||
* See HBASE-7845
|
||||
*/
|
||||
public void testGetShortMidpointKey() {
|
||||
final KeyComparator keyComparator = new KeyValue.KeyComparator();
|
||||
final KVComparator keyComparator = KeyValue.COMPARATOR;
|
||||
//verify that faked shorter rowkey could be generated
|
||||
long ts = 5;
|
||||
KeyValue kv1 = new KeyValue(Bytes.toBytes("the quick brown fox"), family, qualA, ts, Type.Put);
|
||||
KeyValue kv2 = new KeyValue(Bytes.toBytes("the who test text"), family, qualA, ts, Type.Put);
|
||||
byte[] newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey());
|
||||
assertTrue(keyComparator.compare(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(keyComparator.compare(newKey, kv2.getKey()) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) < 0);
|
||||
short newRowLength = Bytes.toShort(newKey, 0);
|
||||
byte[] expectedArray = Bytes.toBytes("the r");
|
||||
Bytes.equals(newKey, KeyValue.ROW_LENGTH_SIZE, newRowLength, expectedArray, 0,
|
||||
|
@ -478,44 +466,44 @@ public class TestKeyValue extends TestCase {
|
|||
//verify: same with "row + family + qualifier", return rightKey directly
|
||||
kv1 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, 5, Type.Put);
|
||||
kv2 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, 0, Type.Put);
|
||||
assertTrue(keyComparator.compare(kv1.getKey(), kv2.getKey()) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(kv1.getKey(), kv2.getKey()) < 0);
|
||||
newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey());
|
||||
assertTrue(keyComparator.compare(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(keyComparator.compare(newKey, kv2.getKey()) == 0);
|
||||
assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) == 0);
|
||||
kv1 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, -5, Type.Put);
|
||||
kv2 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, -10, Type.Put);
|
||||
assertTrue(keyComparator.compare(kv1.getKey(), kv2.getKey()) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(kv1.getKey(), kv2.getKey()) < 0);
|
||||
newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey());
|
||||
assertTrue(keyComparator.compare(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(keyComparator.compare(newKey, kv2.getKey()) == 0);
|
||||
assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) == 0);
|
||||
|
||||
// verify: same with row, different with qualifier
|
||||
kv1 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, 5, Type.Put);
|
||||
kv2 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualB, 5, Type.Put);
|
||||
assertTrue(keyComparator.compare(kv1.getKey(), kv2.getKey()) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(kv1.getKey(), kv2.getKey()) < 0);
|
||||
newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey());
|
||||
assertTrue(keyComparator.compare(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(keyComparator.compare(newKey, kv2.getKey()) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) < 0);
|
||||
KeyValue newKeyValue = KeyValue.createKeyValueFromKey(newKey);
|
||||
assertTrue(Arrays.equals(newKeyValue.getFamily(),family));
|
||||
assertTrue(Arrays.equals(newKeyValue.getQualifier(),qualB));
|
||||
assertTrue(newKeyValue.getTimestamp() == HConstants.LATEST_TIMESTAMP);
|
||||
assertTrue(newKeyValue.getType() == Type.Maximum.getCode());
|
||||
assertTrue(newKeyValue.getTypeByte() == Type.Maximum.getCode());
|
||||
|
||||
//verify metaKeyComparator's getShortMidpointKey output
|
||||
final KeyComparator metaKeyComparator = new KeyValue.MetaKeyComparator();
|
||||
final KVComparator metaKeyComparator = KeyValue.META_COMPARATOR;
|
||||
kv1 = new KeyValue(Bytes.toBytes("ilovehbase123"), family, qualA, 5, Type.Put);
|
||||
kv2 = new KeyValue(Bytes.toBytes("ilovehbase234"), family, qualA, 0, Type.Put);
|
||||
newKey = metaKeyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey());
|
||||
assertTrue(metaKeyComparator.compare(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(metaKeyComparator.compare(newKey, kv2.getKey()) == 0);
|
||||
assertTrue(metaKeyComparator.compareFlatKey(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(metaKeyComparator.compareFlatKey(newKey, kv2.getKey()) == 0);
|
||||
|
||||
//verify common fix scenario
|
||||
kv1 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, ts, Type.Put);
|
||||
kv2 = new KeyValue(Bytes.toBytes("ilovehbaseandhdfs"), family, qualA, ts, Type.Put);
|
||||
assertTrue(keyComparator.compare(kv1.getKey(), kv2.getKey()) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(kv1.getKey(), kv2.getKey()) < 0);
|
||||
newKey = keyComparator.getShortMidpointKey(kv1.getKey(), kv2.getKey());
|
||||
assertTrue(keyComparator.compare(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(keyComparator.compare(newKey, kv2.getKey()) == 0);
|
||||
assertTrue(keyComparator.compareFlatKey(kv1.getKey(), newKey) < 0);
|
||||
assertTrue(keyComparator.compareFlatKey(newKey, kv2.getKey()) == 0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,8 +25,9 @@ import java.nio.ByteBuffer;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.MetaKeyComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.MetaComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.RawBytesComparator;
|
||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||
import org.apache.hadoop.hbase.codec.prefixtree.decode.DecoderFactory;
|
||||
import org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArraySearcher;
|
||||
|
@ -189,11 +190,10 @@ public class PrefixTreeCodec implements DataBlockEncoder{
|
|||
* the way to this point.
|
||||
*/
|
||||
@Override
|
||||
public EncodedSeeker createSeeker(RawComparator<byte[]> comparator, boolean includesMvccVersion) {
|
||||
if(! (comparator instanceof KeyComparator)){
|
||||
public EncodedSeeker createSeeker(KVComparator comparator, boolean includesMvccVersion) {
|
||||
if (comparator instanceof RawBytesComparator){
|
||||
throw new IllegalArgumentException("comparator must be KeyValue.KeyComparator");
|
||||
}
|
||||
if(comparator instanceof MetaKeyComparator){
|
||||
} else if (comparator instanceof MetaComparator){
|
||||
throw new IllegalArgumentException("DataBlockEncoding.PREFIX_TREE not compatible with META "
|
||||
+"table");
|
||||
}
|
||||
|
|
|
@ -160,7 +160,7 @@ public class HalfStoreFileReader extends StoreFile.Reader {
|
|||
// constrain the bottom.
|
||||
if (!top) {
|
||||
ByteBuffer bb = getKey();
|
||||
if (getComparator().compare(bb.array(), bb.arrayOffset(), bb.limit(),
|
||||
if (getComparator().compareFlatKey(bb.array(), bb.arrayOffset(), bb.limit(),
|
||||
splitkey, 0, splitkey.length) >= 0) {
|
||||
atEnd = true;
|
||||
return false;
|
||||
|
@ -179,13 +179,13 @@ public class HalfStoreFileReader extends StoreFile.Reader {
|
|||
byte[] fk = getFirstKey();
|
||||
// This will be null when the file is empty in which we can not seekBefore to any key
|
||||
if (fk == null) return false;
|
||||
if (getComparator().compare(key, offset, length, fk, 0,
|
||||
if (getComparator().compareFlatKey(key, offset, length, fk, 0,
|
||||
fk.length) <= 0) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
// The equals sign isn't strictly necessary just here to be consistent with seekTo
|
||||
if (getComparator().compare(key, offset, length, splitkey, 0,
|
||||
if (getComparator().compareFlatKey(key, offset, length, splitkey, 0,
|
||||
splitkey.length) >= 0) {
|
||||
return this.delegate.seekBefore(splitkey, 0, splitkey.length);
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ public class HalfStoreFileReader extends StoreFile.Reader {
|
|||
// Check key.
|
||||
ByteBuffer k = this.delegate.getKey();
|
||||
return this.delegate.getReader().getComparator().
|
||||
compare(k.array(), k.arrayOffset(), k.limit(),
|
||||
compareFlatKey(k.array(), k.arrayOffset(), k.limit(),
|
||||
splitkey, 0, splitkey.length) < 0;
|
||||
}
|
||||
|
||||
|
@ -226,12 +226,12 @@ public class HalfStoreFileReader extends StoreFile.Reader {
|
|||
|
||||
public int seekTo(byte[] key, int offset, int length) throws IOException {
|
||||
if (top) {
|
||||
if (getComparator().compare(key, offset, length, splitkey, 0,
|
||||
if (getComparator().compareFlatKey(key, offset, length, splitkey, 0,
|
||||
splitkey.length) < 0) {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
if (getComparator().compare(key, offset, length, splitkey, 0,
|
||||
if (getComparator().compareFlatKey(key, offset, length, splitkey, 0,
|
||||
splitkey.length) >= 0) {
|
||||
// we would place the scanner in the second half.
|
||||
// it might be an error to return false here ever...
|
||||
|
@ -256,12 +256,12 @@ public class HalfStoreFileReader extends StoreFile.Reader {
|
|||
//This function is identical to the corresponding seekTo function except
|
||||
//that we call reseekTo (and not seekTo) on the delegate.
|
||||
if (top) {
|
||||
if (getComparator().compare(key, offset, length, splitkey, 0,
|
||||
if (getComparator().compareFlatKey(key, offset, length, splitkey, 0,
|
||||
splitkey.length) < 0) {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
if (getComparator().compare(key, offset, length, splitkey, 0,
|
||||
if (getComparator().compareFlatKey(key, offset, length, splitkey, 0,
|
||||
splitkey.length) >= 0) {
|
||||
// we would place the scanner in the second half.
|
||||
// it might be an error to return false here ever...
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
|
@ -71,7 +72,7 @@ public abstract class AbstractHFileReader implements HFile.Reader {
|
|||
protected int avgValueLen = -1;
|
||||
|
||||
/** Key comparator */
|
||||
protected RawComparator<byte []> comparator;
|
||||
protected KVComparator comparator;
|
||||
|
||||
/** Size of this file. */
|
||||
protected final long fileSize;
|
||||
|
@ -206,7 +207,7 @@ public abstract class AbstractHFileReader implements HFile.Reader {
|
|||
|
||||
/** @return comparator */
|
||||
@Override
|
||||
public RawComparator<byte []> getComparator() {
|
||||
public KVComparator getComparator() {
|
||||
return comparator;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -77,7 +77,7 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
|
|||
protected long totalUncompressedBytes = 0;
|
||||
|
||||
/** Key comparator. Used to ensure we write in order. */
|
||||
protected final KeyComparator comparator;
|
||||
protected final KVComparator comparator;
|
||||
|
||||
/** Meta block names. */
|
||||
protected List<byte[]> metaNames = new ArrayList<byte[]>();
|
||||
|
@ -114,7 +114,7 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
|
|||
FSDataOutputStream outputStream, Path path, int blockSize,
|
||||
Compression.Algorithm compressAlgo,
|
||||
HFileDataBlockEncoder dataBlockEncoder,
|
||||
KeyComparator comparator) {
|
||||
KVComparator comparator) {
|
||||
this.outputStream = outputStream;
|
||||
this.path = path;
|
||||
this.name = path != null ? path.getName() : outputStream.toString();
|
||||
|
@ -124,7 +124,7 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
|
|||
this.blockEncoder = dataBlockEncoder != null
|
||||
? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
|
||||
this.comparator = comparator != null ? comparator
|
||||
: KeyValue.KEY_COMPARATOR;
|
||||
: KeyValue.COMPARATOR;
|
||||
|
||||
closeOutputStream = path != null;
|
||||
this.cacheConf = cacheConf;
|
||||
|
@ -198,8 +198,9 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
|
|||
throw new IOException("Key cannot be null or empty");
|
||||
}
|
||||
if (lastKeyBuffer != null) {
|
||||
int keyComp = comparator.compare(lastKeyBuffer, lastKeyOffset,
|
||||
int keyComp = comparator.compareFlatKey(lastKeyBuffer, lastKeyOffset,
|
||||
lastKeyLength, key, offset, length);
|
||||
|
||||
if (keyComp > 0) {
|
||||
throw new IOException("Added a key not lexically larger than"
|
||||
+ " previous key="
|
||||
|
|
|
@ -31,10 +31,10 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HFileProtos;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
||||
import com.google.common.io.NullOutputStream;
|
||||
|
||||
|
@ -114,8 +114,8 @@ public class FixedFileTrailer {
|
|||
*/
|
||||
private long lastDataBlockOffset;
|
||||
|
||||
/** Raw key comparator class name in version 2 */
|
||||
private String comparatorClassName = KeyValue.KEY_COMPARATOR.getClass().getName();
|
||||
/** Raw key comparator class name in version 3 */
|
||||
private String comparatorClassName = KeyValue.COMPARATOR.getLegacyKeyComparatorName();
|
||||
|
||||
/** The {@link HFile} format major version. */
|
||||
private final int majorVersion;
|
||||
|
@ -214,6 +214,8 @@ public class FixedFileTrailer {
|
|||
.setNumDataIndexLevels(numDataIndexLevels)
|
||||
.setFirstDataBlockOffset(firstDataBlockOffset)
|
||||
.setLastDataBlockOffset(lastDataBlockOffset)
|
||||
// TODO this is a classname encoded into an HFile's trailer. We are going to need to have
|
||||
// some compat code here.
|
||||
.setComparatorClassName(comparatorClassName)
|
||||
.setCompressionCodec(compressionCodec.ordinal())
|
||||
.build().writeDelimitedTo(baos);
|
||||
|
@ -324,6 +326,8 @@ public class FixedFileTrailer {
|
|||
lastDataBlockOffset = builder.getLastDataBlockOffset();
|
||||
}
|
||||
if (builder.hasComparatorClassName()) {
|
||||
// TODO this is a classname encoded into an HFile's trailer. We are going to need to have
|
||||
// some compat code here.
|
||||
setComparatorClass(getComparatorClass(builder.getComparatorClassName()));
|
||||
}
|
||||
if (builder.hasCompressionCodec()) {
|
||||
|
@ -351,6 +355,8 @@ public class FixedFileTrailer {
|
|||
numDataIndexLevels = input.readInt();
|
||||
firstDataBlockOffset = input.readLong();
|
||||
lastDataBlockOffset = input.readLong();
|
||||
// TODO this is a classname encoded into an HFile's trailer. We are going to need to have
|
||||
// some compat code here.
|
||||
setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input,
|
||||
MAX_COMPARATOR_NAME_LENGTH)));
|
||||
}
|
||||
|
@ -555,30 +561,53 @@ public class FixedFileTrailer {
|
|||
return minorVersion;
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
public void setComparatorClass(Class<? extends RawComparator> klass) {
|
||||
// Is the comparator instantiable
|
||||
public void setComparatorClass(Class<? extends KVComparator> klass) {
|
||||
// Is the comparator instantiable?
|
||||
try {
|
||||
klass.newInstance();
|
||||
KVComparator comp = klass.newInstance();
|
||||
|
||||
// HFile V2 legacy comparator class names.
|
||||
if (KeyValue.COMPARATOR.getClass().equals(klass)) {
|
||||
comparatorClassName = KeyValue.COMPARATOR.getLegacyKeyComparatorName();
|
||||
} else if (KeyValue.META_COMPARATOR.getClass().equals(klass)) {
|
||||
comparatorClassName = KeyValue.META_COMPARATOR.getLegacyKeyComparatorName();
|
||||
} else if (KeyValue.RAW_COMPARATOR.getClass().equals(klass)) {
|
||||
comparatorClassName = KeyValue.RAW_COMPARATOR.getLegacyKeyComparatorName();
|
||||
} else {
|
||||
// if the name wasn't one of the legacy names, maybe its a legit new kind of comparator.
|
||||
comparatorClassName = klass.getName();
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Comparator class " + klass.getName() +
|
||||
" is not instantiable", e);
|
||||
}
|
||||
comparatorClassName = klass.getName();
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static Class<? extends RawComparator<byte[]>> getComparatorClass(
|
||||
private static Class<? extends KVComparator> getComparatorClass(
|
||||
String comparatorClassName) throws IOException {
|
||||
try {
|
||||
return (Class<? extends RawComparator<byte[]>>)
|
||||
// HFile V2 legacy comparator class names.
|
||||
if (comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName())) {
|
||||
comparatorClassName = KeyValue.COMPARATOR.getClass().getName();
|
||||
} else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName())) {
|
||||
comparatorClassName = KeyValue.META_COMPARATOR.getClass().getName();
|
||||
} else if (comparatorClassName.equals(KeyValue.RAW_COMPARATOR.getLegacyKeyComparatorName())) {
|
||||
comparatorClassName = KeyValue.RAW_COMPARATOR.getClass().getName();
|
||||
}
|
||||
|
||||
// if the name wasn't one of the legacy names, maybe its a legit new kind of comparator.
|
||||
|
||||
return (Class<? extends KVComparator>)
|
||||
Class.forName(comparatorClassName);
|
||||
} catch (ClassNotFoundException ex) {
|
||||
throw new IOException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static RawComparator<byte[]> createComparator(
|
||||
public static KVComparator createComparator(
|
||||
String comparatorClassName) throws IOException {
|
||||
try {
|
||||
return getComparatorClass(comparatorClassName).newInstance();
|
||||
|
@ -591,7 +620,7 @@ public class FixedFileTrailer {
|
|||
}
|
||||
}
|
||||
|
||||
RawComparator<byte[]> createComparator() throws IOException {
|
||||
KVComparator createComparator() throws IOException {
|
||||
expectAtLeastMajorVersion(2);
|
||||
return createComparator(comparatorClassName);
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ import org.apache.hadoop.fs.PathFilter;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
|
@ -336,7 +336,7 @@ public class HFile {
|
|||
protected Compression.Algorithm compression =
|
||||
HFile.DEFAULT_COMPRESSION_ALGORITHM;
|
||||
protected HFileDataBlockEncoder encoder = NoOpDataBlockEncoder.INSTANCE;
|
||||
protected KeyComparator comparator = KeyValue.KEY_COMPARATOR;
|
||||
protected KVComparator comparator = KeyValue.COMPARATOR;
|
||||
protected InetSocketAddress[] favoredNodes;
|
||||
protected ChecksumType checksumType = HFile.DEFAULT_CHECKSUM_TYPE;
|
||||
protected int bytesPerChecksum = DEFAULT_BYTES_PER_CHECKSUM;
|
||||
|
@ -384,7 +384,7 @@ public class HFile {
|
|||
return this;
|
||||
}
|
||||
|
||||
public WriterFactory withComparator(KeyComparator comparator) {
|
||||
public WriterFactory withComparator(KVComparator comparator) {
|
||||
Preconditions.checkNotNull(comparator);
|
||||
this.comparator = comparator;
|
||||
return this;
|
||||
|
@ -432,7 +432,7 @@ public class HFile {
|
|||
FSDataOutputStream ostream, int blockSize,
|
||||
Compression.Algorithm compress,
|
||||
HFileDataBlockEncoder dataBlockEncoder,
|
||||
KeyComparator comparator, ChecksumType checksumType,
|
||||
KVComparator comparator, ChecksumType checksumType,
|
||||
int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException;
|
||||
}
|
||||
|
||||
|
@ -489,7 +489,7 @@ public class HFile {
|
|||
*/
|
||||
String getName();
|
||||
|
||||
RawComparator<byte []> getComparator();
|
||||
KVComparator getComparator();
|
||||
|
||||
HFileScanner getScanner(boolean cacheBlocks,
|
||||
final boolean pread, final boolean isCompaction);
|
||||
|
|
|
@ -38,13 +38,13 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.CompoundBloomFilterWriter;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
|
@ -106,7 +106,7 @@ public class HFileBlockIndex {
|
|||
*/
|
||||
public static class BlockIndexReader implements HeapSize {
|
||||
/** Needed doing lookup on blocks. */
|
||||
private final RawComparator<byte[]> comparator;
|
||||
private final KVComparator comparator;
|
||||
|
||||
// Root-level data.
|
||||
private byte[][] blockKeys;
|
||||
|
@ -132,13 +132,13 @@ public class HFileBlockIndex {
|
|||
/** A way to read {@link HFile} blocks at a given offset */
|
||||
private CachingBlockReader cachingBlockReader;
|
||||
|
||||
public BlockIndexReader(final RawComparator<byte[]> c, final int treeLevel,
|
||||
public BlockIndexReader(final KVComparator c, final int treeLevel,
|
||||
final CachingBlockReader cachingBlockReader) {
|
||||
this(c, treeLevel);
|
||||
this.cachingBlockReader = cachingBlockReader;
|
||||
}
|
||||
|
||||
public BlockIndexReader(final RawComparator<byte[]> c, final int treeLevel)
|
||||
public BlockIndexReader(final KVComparator c, final int treeLevel)
|
||||
{
|
||||
comparator = c;
|
||||
searchTreeLevel = treeLevel;
|
||||
|
@ -481,7 +481,7 @@ public class HFileBlockIndex {
|
|||
*/
|
||||
static int binarySearchNonRootIndex(byte[] key, int keyOffset,
|
||||
int keyLength, ByteBuffer nonRootIndex,
|
||||
RawComparator<byte[]> comparator) {
|
||||
KVComparator comparator) {
|
||||
|
||||
int numEntries = nonRootIndex.getInt(0);
|
||||
int low = 0;
|
||||
|
@ -516,7 +516,7 @@ public class HFileBlockIndex {
|
|||
|
||||
// we have to compare in this order, because the comparator order
|
||||
// has special logic when the 'left side' is a special key.
|
||||
int cmp = comparator.compare(key, keyOffset, keyLength,
|
||||
int cmp = comparator.compareFlatKey(key, keyOffset, keyLength,
|
||||
nonRootIndex.array(), nonRootIndex.arrayOffset() + midKeyOffset,
|
||||
midLength);
|
||||
|
||||
|
@ -568,7 +568,7 @@ public class HFileBlockIndex {
|
|||
*
|
||||
*/
|
||||
static int locateNonRootIndexEntry(ByteBuffer nonRootBlock, byte[] key,
|
||||
int keyOffset, int keyLength, RawComparator<byte[]> comparator) {
|
||||
int keyOffset, int keyLength, KVComparator comparator) {
|
||||
int entryIndex = binarySearchNonRootIndex(key, keyOffset, keyLength,
|
||||
nonRootBlock, comparator);
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
|
||||
trailer.getNumDataIndexLevels(), this);
|
||||
metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
|
||||
Bytes.BYTES_RAWCOMPARATOR, 1);
|
||||
KeyValue.RAW_COMPARATOR, 1);
|
||||
|
||||
// Parse load-on-open data.
|
||||
|
||||
|
@ -500,7 +500,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
int compared;
|
||||
if (isSeeked()) {
|
||||
ByteBuffer bb = getKey();
|
||||
compared = reader.getComparator().compare(key, offset,
|
||||
compared = reader.getComparator().compareFlatKey(key, offset,
|
||||
length, bb.array(), bb.arrayOffset(), bb.limit());
|
||||
if (compared < 1) {
|
||||
// If the required key is less than or equal to current key, then
|
||||
|
@ -509,7 +509,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
} else {
|
||||
if (this.nextIndexedKey != null &&
|
||||
(this.nextIndexedKey == HConstants.NO_NEXT_INDEXED_KEY ||
|
||||
reader.getComparator().compare(key, offset, length,
|
||||
reader.getComparator().compareFlatKey(key, offset, length,
|
||||
nextIndexedKey, 0, nextIndexedKey.length) < 0)) {
|
||||
// The reader shall continue to scan the current data block instead of querying the
|
||||
// block index as long as it knows the target key is strictly smaller than
|
||||
|
@ -535,7 +535,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
}
|
||||
ByteBuffer firstKey = getFirstKeyInBlock(seekToBlock);
|
||||
|
||||
if (reader.getComparator().compare(firstKey.array(),
|
||||
if (reader.getComparator().compareFlatKey(firstKey.array(),
|
||||
firstKey.arrayOffset(), firstKey.limit(), key, offset, length) == 0)
|
||||
{
|
||||
long previousBlockOffset = seekToBlock.getPrevBlockOffset();
|
||||
|
@ -851,7 +851,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
|
||||
int keyOffset = blockBuffer.arrayOffset() + blockBuffer.position()
|
||||
+ KEY_VALUE_LEN_SIZE;
|
||||
int comp = reader.getComparator().compare(key, offset, length,
|
||||
int comp = reader.getComparator().compareFlatKey(key, offset, length,
|
||||
blockBuffer.array(), keyOffset, klen);
|
||||
|
||||
if (comp == 0) {
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
|
||||
|
@ -100,7 +100,7 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
public Writer createWriter(FileSystem fs, Path path,
|
||||
FSDataOutputStream ostream, int blockSize,
|
||||
Compression.Algorithm compress, HFileDataBlockEncoder blockEncoder,
|
||||
final KeyComparator comparator, final ChecksumType checksumType,
|
||||
final KVComparator comparator, final ChecksumType checksumType,
|
||||
final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
|
||||
return new HFileWriterV2(conf, cacheConf, fs, path, ostream, blockSize, compress,
|
||||
blockEncoder, comparator, checksumType, bytesPerChecksum, includeMVCCReadpoint);
|
||||
|
@ -111,7 +111,7 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
|
||||
FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
|
||||
Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
|
||||
final KeyComparator comparator, final ChecksumType checksumType,
|
||||
final KVComparator comparator, final ChecksumType checksumType,
|
||||
final int bytesPerChecksum, final boolean includeMVCCReadpoint) throws IOException {
|
||||
super(cacheConf,
|
||||
ostream == null ? createOutputStream(conf, fs, path, null) : ostream,
|
||||
|
|
|
@ -74,7 +74,7 @@ class GetClosestRowBeforeTracker {
|
|||
this.tablenamePlusDelimiterLength = metaregion? l + 1: -1;
|
||||
this.oldestts = System.currentTimeMillis() - ttl;
|
||||
this.kvcomparator = c;
|
||||
KeyValue.RowComparator rc = new KeyValue.RowComparator(this.kvcomparator);
|
||||
KeyValue.RowOnlyComparator rc = new KeyValue.RowOnlyComparator(this.kvcomparator);
|
||||
this.deletes = new TreeMap<KeyValue, NavigableSet<KeyValue>>(rc);
|
||||
}
|
||||
|
||||
|
|
|
@ -529,7 +529,7 @@ public class HRegionFileSystem {
|
|||
byte[] lastKey = f.createReader().getLastKey();
|
||||
// If lastKey is null means storefile is empty.
|
||||
if (lastKey == null) return null;
|
||||
if (f.getReader().getComparator().compare(splitKey.getBuffer(),
|
||||
if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
|
||||
splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) {
|
||||
return null;
|
||||
}
|
||||
|
@ -539,7 +539,7 @@ public class HRegionFileSystem {
|
|||
byte[] firstKey = f.createReader().getFirstKey();
|
||||
// If firstKey is null means storefile is empty.
|
||||
if (firstKey == null) return null;
|
||||
if (f.getReader().getComparator().compare(splitKey.getBuffer(),
|
||||
if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
|
||||
splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ public class ScanQueryMatcher {
|
|||
private final KeyValue startKey;
|
||||
|
||||
/** Row comparator for the region this query is for */
|
||||
private final KeyValue.KeyComparator rowComparator;
|
||||
private final KeyValue.KVComparator rowComparator;
|
||||
|
||||
/* row is not private for tests */
|
||||
/** Row the query is on */
|
||||
|
@ -145,7 +145,7 @@ public class ScanQueryMatcher {
|
|||
NavigableSet<byte[]> columns, ScanType scanType,
|
||||
long readPointToUse, long earliestPutTs, long oldestUnexpiredTS) {
|
||||
this.tr = scan.getTimeRange();
|
||||
this.rowComparator = scanInfo.getComparator().getRawComparator();
|
||||
this.rowComparator = scanInfo.getComparator();
|
||||
this.deletes = new ScanDeleteTracker();
|
||||
this.stopRow = scan.getStopRow();
|
||||
this.startKey = KeyValue.createFirstDeleteFamilyOnRow(scan.getStartRow(),
|
||||
|
|
|
@ -798,7 +798,7 @@ public class StoreFile {
|
|||
.withBlockSize(blocksize)
|
||||
.withCompression(compress)
|
||||
.withDataBlockEncoder(this.dataBlockEncoder)
|
||||
.withComparator(comparator.getRawComparator())
|
||||
.withComparator(comparator)
|
||||
.withChecksumType(checksumType)
|
||||
.withBytesPerChecksum(bytesPerChecksum)
|
||||
.withFavoredNodes(favoredNodes)
|
||||
|
@ -877,7 +877,7 @@ public class StoreFile {
|
|||
* @param kv
|
||||
*/
|
||||
public void trackTimestamps(final KeyValue kv) {
|
||||
if (KeyValue.Type.Put.getCode() == kv.getType()) {
|
||||
if (KeyValue.Type.Put.getCode() == kv.getTypeByte()) {
|
||||
earliestPutTs = Math.min(earliestPutTs, kv.getTimestamp());
|
||||
}
|
||||
if (!isTimeRangeTrackerSet) {
|
||||
|
@ -939,7 +939,7 @@ public class StoreFile {
|
|||
}
|
||||
generalBloomFilterWriter.add(bloomKey, bloomKeyOffset, bloomKeyLen);
|
||||
if (lastBloomKey != null
|
||||
&& generalBloomFilterWriter.getComparator().compare(bloomKey,
|
||||
&& generalBloomFilterWriter.getComparator().compareFlatKey(bloomKey,
|
||||
bloomKeyOffset, bloomKeyLen, lastBloomKey,
|
||||
lastBloomKeyOffset, lastBloomKeyLen) <= 0) {
|
||||
throw new IOException("Non-increasing Bloom keys: "
|
||||
|
@ -1105,7 +1105,7 @@ public class StoreFile {
|
|||
this.reader = null;
|
||||
}
|
||||
|
||||
public RawComparator<byte []> getComparator() {
|
||||
public KVComparator getComparator() {
|
||||
return reader.getComparator();
|
||||
}
|
||||
|
||||
|
@ -1333,7 +1333,7 @@ public class StoreFile {
|
|||
// from the file info. For row-column Bloom filters this is not yet
|
||||
// a sufficient condition to return false.
|
||||
boolean keyIsAfterLast = lastBloomKey != null
|
||||
&& bloomFilter.getComparator().compare(key, lastBloomKey) > 0;
|
||||
&& bloomFilter.getComparator().compareFlatKey(key, lastBloomKey) > 0;
|
||||
|
||||
if (bloomFilterType == BloomType.ROWCOL) {
|
||||
// Since a Row Delete is essentially a DeleteFamily applied to all
|
||||
|
@ -1344,7 +1344,7 @@ public class StoreFile {
|
|||
null, 0, 0);
|
||||
|
||||
if (keyIsAfterLast
|
||||
&& bloomFilter.getComparator().compare(rowBloomKey,
|
||||
&& bloomFilter.getComparator().compareFlatKey(rowBloomKey,
|
||||
lastBloomKey) > 0) {
|
||||
exists = false;
|
||||
} else {
|
||||
|
@ -1388,9 +1388,9 @@ public class StoreFile {
|
|||
}
|
||||
KeyValue startKeyValue = KeyValue.createFirstOnRow(scan.getStartRow());
|
||||
KeyValue stopKeyValue = KeyValue.createLastOnRow(scan.getStopRow());
|
||||
boolean nonOverLapping = (getComparator().compare(this.getFirstKey(),
|
||||
boolean nonOverLapping = (getComparator().compareFlatKey(this.getFirstKey(),
|
||||
stopKeyValue.getKey()) > 0 && !Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW))
|
||||
|| getComparator().compare(this.getLastKey(), startKeyValue.getKey()) < 0;
|
||||
|| getComparator().compareFlatKey(this.getLastKey(), startKeyValue.getKey()) < 0;
|
||||
return !nonOverLapping;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.hbase.util;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
|
||||
/**
|
||||
* Common methods Bloom filter methods required at read and write time.
|
||||
|
@ -52,6 +52,6 @@ public interface BloomFilterBase {
|
|||
/**
|
||||
* @return Bloom key comparator
|
||||
*/
|
||||
RawComparator<byte[]> getComparator();
|
||||
KVComparator getComparator();
|
||||
|
||||
}
|
||||
|
|
|
@ -200,7 +200,7 @@ public final class BloomFilterFactory {
|
|||
// In case of compound Bloom filters we ignore the maxKeys hint.
|
||||
CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
|
||||
err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
|
||||
bloomType == BloomType.ROWCOL ? KeyValue.KEY_COMPARATOR : Bytes.BYTES_RAWCOMPARATOR);
|
||||
bloomType == BloomType.ROWCOL ? KeyValue.COMPARATOR : KeyValue.RAW_COMPARATOR);
|
||||
writer.addInlineBlockWriter(bloomWriter);
|
||||
return bloomWriter;
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ public final class BloomFilterFactory {
|
|||
// In case of compound Bloom filters we ignore the maxKeys hint.
|
||||
CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
|
||||
err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
|
||||
Bytes.BYTES_RAWCOMPARATOR);
|
||||
KeyValue.RAW_COMPARATOR);
|
||||
writer.addInlineBlockWriter(bloomWriter);
|
||||
return bloomWriter;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
package org.apache.hadoop.hbase.util;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
|
@ -625,8 +627,9 @@ public class ByteBloomFilter implements BloomFilter, BloomFilterWriter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public RawComparator<byte[]> getComparator() {
|
||||
return Bytes.BYTES_RAWCOMPARATOR;
|
||||
public KVComparator getComparator() {
|
||||
// return Bytes.BYTES_RAWCOMPARATOR;
|
||||
return KeyValue.RAW_COMPARATOR;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -24,12 +24,12 @@ import java.io.IOException;
|
|||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockType;
|
||||
import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
||||
/**
|
||||
* A Bloom filter implementation built on top of {@link ByteBloomFilter},
|
||||
|
@ -131,7 +131,7 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase
|
|||
}
|
||||
|
||||
@Override
|
||||
public RawComparator<byte[]> getComparator() {
|
||||
public KVComparator getComparator() {
|
||||
return comparator;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.util;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
|
@ -51,7 +52,7 @@ public class CompoundBloomFilterBase implements BloomFilterBase {
|
|||
protected int hashType;
|
||||
|
||||
/** Comparator used to compare Bloom filter keys */
|
||||
protected RawComparator<byte[]> comparator;
|
||||
protected KVComparator comparator;
|
||||
|
||||
@Override
|
||||
public long getMaxKeys() {
|
||||
|
@ -89,7 +90,7 @@ public class CompoundBloomFilterBase implements BloomFilterBase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public RawComparator<byte[]> getComparator() {
|
||||
public KVComparator getComparator() {
|
||||
return comparator;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,10 +28,10 @@ import java.util.Queue;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockType;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex;
|
||||
import org.apache.hadoop.hbase.io.hfile.InlineBlockWriter;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
/**
|
||||
|
@ -89,7 +89,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase
|
|||
*/
|
||||
public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate,
|
||||
int hashType, int maxFold, boolean cacheOnWrite,
|
||||
RawComparator<byte[]> comparator) {
|
||||
KVComparator comparator) {
|
||||
chunkByteSize = ByteBloomFilter.computeFoldableByteSize(
|
||||
chunkByteSizeHint * 8L, maxFold);
|
||||
|
||||
|
|
|
@ -185,7 +185,7 @@ public class TestDataBlockEncoders {
|
|||
ByteBuffer.wrap(encodeBytes(encoding, originalBuffer));
|
||||
DataBlockEncoder encoder = encoding.getEncoder();
|
||||
DataBlockEncoder.EncodedSeeker seeker =
|
||||
encoder.createSeeker(KeyValue.KEY_COMPARATOR, includesMemstoreTS);
|
||||
encoder.createSeeker(KeyValue.COMPARATOR, includesMemstoreTS);
|
||||
seeker.setCurrentBuffer(encodedBuffer);
|
||||
encodedSeekers.add(seeker);
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ public class TestDataBlockEncoders {
|
|||
"Bug while encoding using '%s'", encoder.toString()), e);
|
||||
}
|
||||
DataBlockEncoder.EncodedSeeker seeker =
|
||||
encoder.createSeeker(KeyValue.KEY_COMPARATOR, includesMemstoreTS);
|
||||
encoder.createSeeker(KeyValue.COMPARATOR, includesMemstoreTS);
|
||||
seeker.setCurrentBuffer(encodedBuffer);
|
||||
int i = 0;
|
||||
do {
|
||||
|
|
|
@ -77,7 +77,7 @@ public class TestPrefixTreeEncoding {
|
|||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||
Algorithm.NONE, DataBlockEncoding.PREFIX_TREE, new byte[0]);
|
||||
encoder.encodeKeyValues(dataBuffer, false, blkEncodingCtx);
|
||||
EncodedSeeker seeker = encoder.createSeeker(KeyValue.KEY_COMPARATOR, false);
|
||||
EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, false);
|
||||
byte[] onDiskBytes = blkEncodingCtx.getOnDiskBytesWithHeader();
|
||||
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes,
|
||||
DataBlockEncoding.ID_SIZE, onDiskBytes.length
|
||||
|
@ -117,7 +117,7 @@ public class TestPrefixTreeEncoding {
|
|||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||
Algorithm.NONE, DataBlockEncoding.PREFIX_TREE, new byte[0]);
|
||||
encoder.encodeKeyValues(dataBuffer, false, blkEncodingCtx);
|
||||
EncodedSeeker seeker = encoder.createSeeker(KeyValue.KEY_COMPARATOR, false);
|
||||
EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, false);
|
||||
byte[] onDiskBytes=blkEncodingCtx.getOnDiskBytesWithHeader();
|
||||
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes,
|
||||
DataBlockEncoding.ID_SIZE, onDiskBytes.length
|
||||
|
@ -143,7 +143,7 @@ public class TestPrefixTreeEncoding {
|
|||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||
Algorithm.NONE, DataBlockEncoding.PREFIX_TREE, new byte[0]);
|
||||
encoder.encodeKeyValues(dataBuffer, false, blkEncodingCtx);
|
||||
EncodedSeeker seeker = encoder.createSeeker(KeyValue.KEY_COMPARATOR, false);
|
||||
EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, false);
|
||||
byte[] onDiskBytes = blkEncodingCtx.getOnDiskBytesWithHeader();
|
||||
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes,
|
||||
DataBlockEncoding.ID_SIZE, onDiskBytes.length
|
||||
|
@ -159,7 +159,7 @@ public class TestPrefixTreeEncoding {
|
|||
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
|
||||
Algorithm.NONE, DataBlockEncoding.PREFIX_TREE, new byte[0]);
|
||||
encoder.encodeKeyValues(dataBuffer, false, blkEncodingCtx);
|
||||
EncodedSeeker seeker = encoder.createSeeker(KeyValue.KEY_COMPARATOR,
|
||||
EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
|
||||
false);
|
||||
byte[] onDiskBytes = blkEncodingCtx.getOnDiskBytesWithHeader();
|
||||
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes,
|
||||
|
|
|
@ -92,7 +92,7 @@ public class TestFixedFileTrailer {
|
|||
|
||||
t.setLastDataBlockOffset(291);
|
||||
t.setNumDataIndexLevels(3);
|
||||
t.setComparatorClass(KeyValue.KEY_COMPARATOR.getClass());
|
||||
t.setComparatorClass(KeyValue.COMPARATOR.getClass());
|
||||
t.setFirstDataBlockOffset(9081723123L); // Completely unrealistic.
|
||||
t.setUncompressedDataIndexSize(827398717L); // Something random.
|
||||
|
||||
|
@ -209,7 +209,7 @@ public class TestFixedFileTrailer {
|
|||
assertEquals(expected.getFirstDataBlockOffset(),
|
||||
loaded.getFirstDataBlockOffset());
|
||||
assertTrue(
|
||||
expected.createComparator() instanceof KeyValue.KeyComparator);
|
||||
expected.createComparator() instanceof KeyValue.KVComparator);
|
||||
assertEquals(expected.getUncompressedDataIndexSize(),
|
||||
loaded.getUncompressedDataIndexSize());
|
||||
}
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
|
||||
|
@ -216,7 +215,7 @@ public class TestHFile extends HBaseTestCase {
|
|||
.withBlockSize(minBlockSize)
|
||||
.withCompression(codec)
|
||||
// NOTE: This test is dependent on this deprecated nonstandard comparator
|
||||
.withComparator(new KeyValue.RawKeyComparator())
|
||||
.withComparator(new KeyValue.RawBytesComparator())
|
||||
.create();
|
||||
LOG.info(writer);
|
||||
writeRecords(writer);
|
||||
|
@ -350,36 +349,5 @@ public class TestHFile extends HBaseTestCase {
|
|||
assertTrue(Compression.Algorithm.LZ4.ordinal() == 4);
|
||||
}
|
||||
|
||||
// This can't be an anonymous class because the compiler will not generate
|
||||
// a nullary constructor for it.
|
||||
static class CustomKeyComparator extends KeyComparator {
|
||||
@Override
|
||||
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
|
||||
int l2) {
|
||||
return -Bytes.compareTo(b1, s1, l1, b2, s2, l2);
|
||||
}
|
||||
@Override
|
||||
public int compare(byte[] o1, byte[] o2) {
|
||||
return compare(o1, 0, o1.length, o2, 0, o2.length);
|
||||
}
|
||||
}
|
||||
|
||||
public void testComparator() throws IOException {
|
||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path mFile = new Path(ROOT_DIR, "meta.tfile");
|
||||
FSDataOutputStream fout = createFSOutput(mFile);
|
||||
KeyComparator comparator = new CustomKeyComparator();
|
||||
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
||||
.withOutputStream(fout)
|
||||
.withBlockSize(minBlockSize)
|
||||
.withComparator(comparator)
|
||||
.create();
|
||||
writer.append("3".getBytes(), "0".getBytes());
|
||||
writer.append("2".getBytes(), "0".getBytes());
|
||||
writer.append("1".getBytes(), "0".getBytes());
|
||||
writer.close();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -42,11 +42,13 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.junit.Before;
|
||||
|
@ -173,7 +175,7 @@ public class TestHFileBlockIndex {
|
|||
BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
|
||||
HFileBlockIndex.BlockIndexReader indexReader =
|
||||
new HFileBlockIndex.BlockIndexReader(
|
||||
Bytes.BYTES_RAWCOMPARATOR, numLevels, brw);
|
||||
KeyValue.RAW_COMPARATOR, numLevels, brw);
|
||||
|
||||
indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
|
||||
fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);
|
||||
|
@ -355,7 +357,7 @@ public class TestHFileBlockIndex {
|
|||
|
||||
int searchResult = BlockIndexReader.binarySearchNonRootIndex(
|
||||
arrayHoldingKey, searchKey.length / 2, searchKey.length, nonRootIndex,
|
||||
Bytes.BYTES_RAWCOMPARATOR);
|
||||
KeyValue.RAW_COMPARATOR);
|
||||
String lookupFailureMsg = "Failed to look up key #" + i + " ("
|
||||
+ Bytes.toStringBinary(searchKey) + ")";
|
||||
|
||||
|
@ -381,7 +383,7 @@ public class TestHFileBlockIndex {
|
|||
// higher-level API function.s
|
||||
boolean locateBlockResult =
|
||||
(BlockIndexReader.locateNonRootIndexEntry(nonRootIndex, arrayHoldingKey,
|
||||
searchKey.length / 2, searchKey.length, Bytes.BYTES_RAWCOMPARATOR) != -1);
|
||||
searchKey.length / 2, searchKey.length, KeyValue.RAW_COMPARATOR) != -1);
|
||||
|
||||
if (i == 0) {
|
||||
assertFalse(locateBlockResult);
|
||||
|
@ -441,7 +443,7 @@ public class TestHFileBlockIndex {
|
|||
long expected = ClassSize.estimateBase(cl, false);
|
||||
|
||||
HFileBlockIndex.BlockIndexReader bi =
|
||||
new HFileBlockIndex.BlockIndexReader(Bytes.BYTES_RAWCOMPARATOR, 1);
|
||||
new HFileBlockIndex.BlockIndexReader(KeyValue.RAW_COMPARATOR, 1);
|
||||
long actual = bi.heapSize();
|
||||
|
||||
// Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
|
||||
|
@ -506,7 +508,7 @@ public class TestHFileBlockIndex {
|
|||
keyStrSet.add(Bytes.toStringBinary(k));
|
||||
|
||||
if (i > 0) {
|
||||
assertTrue(KeyValue.KEY_COMPARATOR.compare(keys[i - 1],
|
||||
assertTrue(KeyValue.COMPARATOR.compareFlatKey(keys[i - 1],
|
||||
keys[i]) < 0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ public class TestHFilePerformance extends TestCase {
|
|||
.withOutputStream(fout)
|
||||
.withBlockSize(minBlockSize)
|
||||
.withCompression(codecName)
|
||||
.withComparator(new KeyValue.RawKeyComparator())
|
||||
.withComparator(new KeyValue.RawBytesComparator())
|
||||
.create();
|
||||
|
||||
// Writing value in one shot.
|
||||
|
|
|
@ -131,7 +131,7 @@ public class TestHFileSeek extends TestCase {
|
|||
.withOutputStream(fout)
|
||||
.withBlockSize(options.minBlockSize)
|
||||
.withCompression(options.compress)
|
||||
.withComparator(new KeyValue.RawKeyComparator())
|
||||
.withComparator(new KeyValue.RawBytesComparator())
|
||||
.create();
|
||||
try {
|
||||
BytesWritable key = new BytesWritable();
|
||||
|
|
|
@ -39,13 +39,13 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.junit.Before;
|
||||
|
@ -137,13 +137,13 @@ public class TestHFileWriterV2 {
|
|||
HFileBlock.FSReader blockReader =
|
||||
new HFileBlock.FSReaderV2(fsdis, compressAlgo, fileSize);
|
||||
// Comparator class name is stored in the trailer in version 2.
|
||||
RawComparator<byte []> comparator = trailer.createComparator();
|
||||
KVComparator comparator = trailer.createComparator();
|
||||
HFileBlockIndex.BlockIndexReader dataBlockIndexReader =
|
||||
new HFileBlockIndex.BlockIndexReader(comparator,
|
||||
trailer.getNumDataIndexLevels());
|
||||
HFileBlockIndex.BlockIndexReader metaBlockIndexReader =
|
||||
new HFileBlockIndex.BlockIndexReader(
|
||||
Bytes.BYTES_RAWCOMPARATOR, 1);
|
||||
KeyValue.RAW_COMPARATOR, 1);
|
||||
|
||||
HFileBlock.BlockIterator blockIter = blockReader.blockRange(
|
||||
trailer.getLoadOnOpenDataOffset(),
|
||||
|
|
|
@ -51,7 +51,7 @@ public class TestReseekTo {
|
|||
.withOutputStream(fout)
|
||||
.withBlockSize(4000)
|
||||
// NOTE: This test is dependent on this deprecated nonstandard comparator
|
||||
.withComparator(new KeyValue.RawKeyComparator())
|
||||
.withComparator(new KeyValue.RawBytesComparator())
|
||||
.create();
|
||||
int numberOfKeys = 1000;
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
@ -51,7 +50,7 @@ public class TestSeekTo extends HBaseTestCase {
|
|||
.withOutputStream(fout)
|
||||
.withBlockSize(blocksize)
|
||||
// NOTE: This test is dependent on this deprecated nonstandard comparator
|
||||
.withComparator(new KeyValue.RawKeyComparator())
|
||||
.withComparator(KeyValue.RAW_COMPARATOR)
|
||||
.create();
|
||||
// 4 bytes * 3 * 2 for each key/value +
|
||||
// 3 for keys, 15 for values = 42 (woot)
|
||||
|
|
|
@ -536,9 +536,10 @@ public class TestMemStore extends TestCase {
|
|||
List<KeyValue> results = new ArrayList<KeyValue>();
|
||||
for (int i = 0; scanner.next(results); i++) {
|
||||
int rowId = startRowId + i;
|
||||
KeyValue left = results.get(0);
|
||||
byte[] row1 = Bytes.toBytes(rowId);
|
||||
assertTrue("Row name",
|
||||
KeyValue.COMPARATOR.compareRows(results.get(0),
|
||||
Bytes.toBytes(rowId)) == 0);
|
||||
KeyValue.COMPARATOR.compareRows(left.getBuffer(), left.getRowOffset(), (int) left.getRowLength(), row1, 0, row1.length) == 0);
|
||||
assertEquals("Count of columns", QUALIFIER_COUNT, results.size());
|
||||
List<KeyValue> row = new ArrayList<KeyValue>();
|
||||
for (KeyValue kv : results) {
|
||||
|
|
|
@ -327,7 +327,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
(topScanner.isSeeked() && topScanner.next())) {
|
||||
key = topScanner.getKey();
|
||||
|
||||
if (topScanner.getReader().getComparator().compare(key.array(),
|
||||
if (topScanner.getReader().getComparator().compareFlatKey(key.array(),
|
||||
key.arrayOffset(), key.limit(), midkey, 0, midkey.length) < 0) {
|
||||
fail("key=" + Bytes.toStringBinary(key) + " < midkey=" +
|
||||
Bytes.toStringBinary(midkey));
|
||||
|
@ -377,7 +377,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
|
||||
topScanner.next()) {
|
||||
key = topScanner.getKey();
|
||||
assertTrue(topScanner.getReader().getComparator().compare(key.array(),
|
||||
assertTrue(topScanner.getReader().getComparator().compareFlatKey(key.array(),
|
||||
key.arrayOffset(), key.limit(), badmidkey, 0, badmidkey.length) >= 0);
|
||||
if (first) {
|
||||
first = false;
|
||||
|
|
Loading…
Reference in New Issue