HBSE-18945 Make a IA.LimitedPrivate interface for CellComparator (Ram)

This commit is contained in:
Ramkrishna 2017-10-17 23:17:07 +05:30
parent b96e2f055d
commit 2cb64fb467
127 changed files with 1278 additions and 1059 deletions

View File

@ -37,7 +37,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
@ -255,7 +254,7 @@ public final class BackupSystemTable implements Closeable {
res.advance();
byte[] row = CellUtil.cloneRow(res.listCells().get(0));
for (Cell cell : res.listCells()) {
if (CellComparator.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0) {
map.put(row, Bytes.toString(CellUtil.cloneValue(cell)));
}
@ -284,13 +283,13 @@ public final class BackupSystemTable implements Closeable {
byte[] fam = null;
String path = null;
for (Cell cell : res.listCells()) {
if (CellComparator.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
if (CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
BackupSystemTable.TBL_COL.length) == 0) {
tbl = TableName.valueOf(CellUtil.cloneValue(cell));
} else if (CellComparator.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
BackupSystemTable.FAM_COL.length) == 0) {
fam = CellUtil.cloneValue(cell);
} else if (CellComparator.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0) {
path = Bytes.toString(CellUtil.cloneValue(cell));
}
@ -436,13 +435,13 @@ public final class BackupSystemTable implements Closeable {
rows.add(row);
String rowStr = Bytes.toString(row);
region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr);
if (CellComparator.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
BackupSystemTable.FAM_COL.length) == 0) {
fam = Bytes.toString(CellUtil.cloneValue(cell));
} else if (CellComparator.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
BackupSystemTable.PATH_COL.length) == 0) {
path = Bytes.toString(CellUtil.cloneValue(cell));
} else if (CellComparator.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
} else if (CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
BackupSystemTable.STATE_COL.length) == 0) {
byte[] state = CellUtil.cloneValue(cell);
if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) {

View File

@ -39,7 +39,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -315,7 +315,8 @@ public final class ConnectionUtils {
return result;
}
Cell[] rawCells = result.rawCells();
int index = Arrays.binarySearch(rawCells, keepCellsAfter, CellComparator::compareWithoutRow);
int index =
Arrays.binarySearch(rawCells, keepCellsAfter, CellComparatorImpl.COMPARATOR::compareWithoutRow);
if (index < 0) {
index = -index - 1;
} else {

View File

@ -29,7 +29,6 @@ import java.util.TreeMap;
import java.util.UUID;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;

View File

@ -35,6 +35,7 @@ import java.util.TreeMap;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
@ -212,14 +213,14 @@ public class Result implements CellScannable, CellScanner {
* Return the array of Cells backing this Result instance.
*
* The array is sorted from smallest -&gt; largest using the
* {@link CellComparator#COMPARATOR}.
* {@link CellComparator}.
*
* The array only contains what your Get or Scan specifies and no more.
* For example if you request column "A" 1 version you will have at most 1
* Cell in the array. If you request column "A" with 2 version you will
* have at most 2 Cells, with the first one being the newer timestamp and
* the second being the older timestamp (this is the sort order defined by
* {@link CellComparator#COMPARATOR}). If columns don't exist, they won't be
* {@link CellComparator}). If columns don't exist, they won't be
* present in the result. Therefore if you ask for 1 version all columns,
* it is safe to iterate over this array and expect to see 1 Cell for
* each column and no more.
@ -245,7 +246,7 @@ public class Result implements CellScannable, CellScanner {
/**
* Return the Cells for the specific column. The Cells are sorted in
* the {@link CellComparator#COMPARATOR} order. That implies the first entry in
* the {@link CellComparator} order. That implies the first entry in
* the list is the most recent column. If the query (Scan or Get) only
* requested 1 version the list will contain at most 1 entry. If the column
* did not exist in the result set (either the column does not exist
@ -302,7 +303,7 @@ public class Result implements CellScannable, CellScanner {
qualifierNotNull, 0, qualifierNotNull.length);
// pos === ( -(insertion point) - 1)
int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.COMPARATOR);
int pos = Arrays.binarySearch(kvs, searchTerm, CellComparatorImpl.COMPARATOR);
// never will exact match
if (pos < 0) {
pos = (pos+1) * -1;
@ -347,7 +348,7 @@ public class Result implements CellScannable, CellScanner {
qualifier, qoffset, qlength);
// pos === ( -(insertion point) - 1)
int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.COMPARATOR);
int pos = Arrays.binarySearch(kvs, searchTerm, CellComparatorImpl.COMPARATOR);
// never will exact match
if (pos < 0) {
pos = (pos+1) * -1;

View File

@ -22,7 +22,6 @@ import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@ -120,7 +119,7 @@ public class ColumnPaginationFilter extends FilterBase {
int cmp = 0;
// Only compare if no KV's have been seen so far.
if (count == 0) {
cmp = CellComparator.compareQualifiers(v, this.columnOffset, 0, this.columnOffset.length);
cmp = CellUtil.compareQualifiers(v, this.columnOffset, 0, this.columnOffset.length);
}
if (cmp < 0) {
return ReturnCode.SEEK_NEXT_USING_HINT;

View File

@ -25,7 +25,6 @@ import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@ -126,7 +125,7 @@ public class ColumnRangeFilter extends FilterBase {
int cmpMin = 1;
if (this.minColumn != null) {
cmpMin = CellComparator.compareQualifiers(kv, this.minColumn, 0, this.minColumn.length);
cmpMin = CellUtil.compareQualifiers(kv, this.minColumn, 0, this.minColumn.length);
}
if (cmpMin < 0) {
@ -141,7 +140,7 @@ public class ColumnRangeFilter extends FilterBase {
return ReturnCode.INCLUDE;
}
int cmpMax = CellComparator.compareQualifiers(kv, this.maxColumn, 0, this.maxColumn.length);
int cmpMax = CellUtil.compareQualifiers(kv, this.maxColumn, 0, this.maxColumn.length);
if (this.maxColumnInclusive && cmpMax <= 0 ||
!this.maxColumnInclusive && cmpMax < 0) {

View File

@ -23,7 +23,7 @@ import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@ -137,7 +137,7 @@ public abstract class CompareFilter extends FilterBase {
if (compareOp == CompareOp.NO_OP) {
return true;
}
int compareResult = CellComparator.compareRow(cell, comparator);
int compareResult = CellUtil.compareRow(cell, comparator);
return compare(compareOp, compareResult);
}
@ -146,7 +146,7 @@ public abstract class CompareFilter extends FilterBase {
if (op == CompareOperator.NO_OP) {
return true;
}
int compareResult = CellComparator.compareRow(cell, comparator);
int compareResult = CellUtil.compareRow(cell, comparator);
return compare(op, compareResult);
}
@ -160,7 +160,7 @@ public abstract class CompareFilter extends FilterBase {
if (compareOp == CompareOp.NO_OP) {
return true;
}
int compareResult = CellComparator.compareFamily(cell, comparator);
int compareResult = CellUtil.compareFamily(cell, comparator);
return compare(compareOp, compareResult);
}
@ -169,7 +169,7 @@ public abstract class CompareFilter extends FilterBase {
if (op == CompareOperator.NO_OP) {
return true;
}
int compareResult = CellComparator.compareFamily(cell, comparator);
int compareResult = CellUtil.compareFamily(cell, comparator);
return compare(op, compareResult);
}
@ -184,7 +184,7 @@ public abstract class CompareFilter extends FilterBase {
if (compareOp == CompareOp.NO_OP) {
return true;
}
int compareResult = CellComparator.compareQualifier(cell, comparator);
int compareResult = CellUtil.compareQualifier(cell, comparator);
return compare(compareOp, compareResult);
}
@ -194,7 +194,7 @@ public abstract class CompareFilter extends FilterBase {
if (op == CompareOperator.NO_OP) {
return true;
}
int compareResult = CellComparator.compareQualifier(cell, comparator);
int compareResult = CellUtil.compareQualifier(cell, comparator);
return compare(op, compareResult);
}
@ -209,7 +209,7 @@ public abstract class CompareFilter extends FilterBase {
if (compareOp == CompareOp.NO_OP) {
return true;
}
int compareResult = CellComparator.compareValue(cell, comparator);
int compareResult = CellUtil.compareValue(cell, comparator);
return compare(compareOp, compareResult);
}
@ -218,7 +218,7 @@ public abstract class CompareFilter extends FilterBase {
if (op == CompareOperator.NO_OP) {
return true;
}
int compareResult = CellComparator.compareValue(cell, comparator);
int compareResult = CellUtil.compareValue(cell, comparator);
return compare(op, compareResult);
}

View File

@ -26,7 +26,7 @@ import java.util.Collections;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@ -500,7 +500,7 @@ final public class FilterList extends FilterBase {
keyHint = curKeyHint;
continue;
}
if (CellComparator.COMPARATOR.compare(keyHint, curKeyHint) < 0) {
if (CellComparatorImpl.COMPARATOR.compare(keyHint, curKeyHint) < 0) {
keyHint = curKeyHint;
}
}
@ -523,7 +523,7 @@ final public class FilterList extends FilterBase {
keyHint = curKeyHint;
continue;
}
if (CellComparator.COMPARATOR.compare(keyHint, curKeyHint) > 0) {
if (CellComparatorImpl.COMPARATOR.compare(keyHint, curKeyHint) > 0) {
keyHint = curKeyHint;
}
}

View File

@ -24,7 +24,7 @@ import java.util.List;
import java.util.PriorityQueue;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@ -235,7 +235,7 @@ public class FuzzyRowFilter extends FilterBase {
boolean lessThan(Cell currentCell, byte[] nextRowKey) {
int compareResult =
CellComparator.COMPARATOR.compareRows(currentCell, nextRowKey, 0, nextRowKey.length);
CellComparatorImpl.COMPARATOR.compareRows(currentCell, nextRowKey, 0, nextRowKey.length);
return (!isReversed() && compareResult < 0) || (isReversed() && compareResult > 0);
}

View File

@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.filter;
import java.util.ArrayList;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
@ -60,7 +60,7 @@ public class InclusiveStopFilter extends FilterBase {
public boolean filterRowKey(Cell firstRowCell) {
// if stopRowKey is <= buffer, then true, filter row.
if (filterAllRemaining()) return true;
int cmp = CellComparator.COMPARATOR.compareRows(firstRowCell, stopRowKey, 0, stopRowKey.length);
int cmp = CellComparatorImpl.COMPARATOR.compareRows(firstRowCell, stopRowKey, 0, stopRowKey.length);
done = reversed ? cmp < 0 : cmp > 0;
return done;
}

View File

@ -23,7 +23,6 @@ import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.yetus.audience.InterfaceAudience;
@ -268,7 +267,7 @@ public class SingleColumnValueFilter extends FilterBase {
}
private boolean filterColumnValue(final Cell cell) {
int compareResult = CellComparator.compareValue(cell, this.comparator);
int compareResult = CellUtil.compareValue(cell, this.comparator);
return CompareFilter.compare(this.op, compareResult);
}

View File

@ -32,7 +32,7 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.commons.lang3.NotImplementedException;
import org.apache.commons.logging.Log;
@ -677,7 +677,7 @@ public class TestClientNoCluster extends Configured implements Tool {
* Comparator for meta row keys.
*/
private static class MetaRowsComparator implements Comparator<byte []> {
private final CellComparator delegate = CellComparator.META_COMPARATOR;
private final CellComparatorImpl delegate = CellComparatorImpl.META_COMPARATOR;
@Override
public int compare(byte[] left, byte[] right) {
return delegate.compareRows(new KeyValue.KeyOnlyKeyValue(left), right, 0, right.length);

View File

@ -29,7 +29,7 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
@ -388,7 +388,7 @@ public class TestOperation {
Assert.assertEquals(1984L, c.get(0).getTimestamp());
Assert.assertArrayEquals(VALUE, CellUtil.cloneValue(c.get(0)));
Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimeStamp());
Assert.assertEquals(0, CellComparator.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0))));
Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0))));
p = new Put(ROW);
p.addColumn(FAMILY, ByteBuffer.wrap(QUALIFIER), 2013L, null);
@ -397,7 +397,7 @@ public class TestOperation {
Assert.assertEquals(2013L, c.get(0).getTimestamp());
Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0)));
Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimeStamp());
Assert.assertEquals(0, CellComparator.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0))));
Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0))));
p = new Put(ByteBuffer.wrap(ROW));
p.addColumn(FAMILY, ByteBuffer.wrap(QUALIFIER), 2001L, null);
@ -407,7 +407,7 @@ public class TestOperation {
Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0)));
Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0)));
Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimeStamp());
Assert.assertEquals(0, CellComparator.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0))));
Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0))));
p = new Put(ByteBuffer.wrap(ROW), 1970L);
p.addColumn(FAMILY, ByteBuffer.wrap(QUALIFIER), 2001L, null);
@ -417,7 +417,7 @@ public class TestOperation {
Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0)));
Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0)));
Assert.assertEquals(1970L, p.getTimeStamp());
Assert.assertEquals(0, CellComparator.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0))));
Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0))));
}
@Test

View File

@ -24,7 +24,7 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@ -53,44 +53,44 @@ public class TestComparators {
ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining());
ByteArrayComparable comparable = new BinaryComparator(r1);
assertEquals(0, CellComparator.compareRow(bbCell, comparable));
assertEquals(0, CellComparator.compareRow(kv, comparable));
assertEquals(0, CellUtil.compareRow(bbCell, comparable));
assertEquals(0, CellUtil.compareRow(kv, comparable));
kv = new KeyValue(r0, f, q1, v1);
buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining());
assertTrue(CellComparator.compareRow(bbCell, comparable) > 0);
assertTrue(CellComparator.compareRow(kv, comparable) > 0);
assertTrue(CellUtil.compareRow(bbCell, comparable) > 0);
assertTrue(CellUtil.compareRow(kv, comparable) > 0);
kv = new KeyValue(r2, f, q1, v1);
buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining());
assertTrue(CellComparator.compareRow(bbCell, comparable) < 0);
assertTrue(CellComparator.compareRow(kv, comparable) < 0);
assertTrue(CellUtil.compareRow(bbCell, comparable) < 0);
assertTrue(CellUtil.compareRow(kv, comparable) < 0);
// Qualifier compare
comparable = new BinaryPrefixComparator(Bytes.toBytes("qual"));
assertEquals(0, CellComparator.compareQualifier(bbCell, comparable));
assertEquals(0, CellComparator.compareQualifier(kv, comparable));
assertEquals(0, CellUtil.compareQualifier(bbCell, comparable));
assertEquals(0, CellUtil.compareQualifier(kv, comparable));
kv = new KeyValue(r2, f, q2, v1);
buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining());
assertEquals(0, CellComparator.compareQualifier(bbCell, comparable));
assertEquals(0, CellComparator.compareQualifier(kv, comparable));
assertEquals(0, CellUtil.compareQualifier(bbCell, comparable));
assertEquals(0, CellUtil.compareQualifier(kv, comparable));
kv = new KeyValue(r2, f, q3, v1);
buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining());
assertTrue(CellComparator.compareQualifier(bbCell, comparable) < 0);
assertTrue(CellComparator.compareQualifier(kv, comparable) < 0);
assertTrue(CellUtil.compareQualifier(bbCell, comparable) < 0);
assertTrue(CellUtil.compareQualifier(kv, comparable) < 0);
// Value compare
comparable = new LongComparator(l1);
assertEquals(0, CellComparator.compareValue(bbCell, comparable));
assertEquals(0, CellComparator.compareValue(kv, comparable));
assertEquals(0, CellUtil.compareValue(bbCell, comparable));
assertEquals(0, CellUtil.compareValue(kv, comparable));
kv = new KeyValue(r1, f, q1, v2);
buffer = ByteBuffer.wrap(kv.getBuffer());
bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining());
assertTrue(CellComparator.compareValue(bbCell, comparable) < 0);
assertTrue(CellComparator.compareValue(kv, comparable) < 0);
assertTrue(CellUtil.compareValue(bbCell, comparable) < 0);
assertTrue(CellUtil.compareValue(kv, comparable) < 0);
// Family compare
comparable = new SubstringComparator("cf");
assertEquals(0, CellComparator.compareFamily(bbCell, comparable));
assertEquals(0, CellComparator.compareFamily(kv, comparable));
assertEquals(0, CellUtil.compareFamily(bbCell, comparable));
assertEquals(0, CellUtil.compareFamily(kv, comparable));
}
}

View File

@ -26,7 +26,7 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Append;
@ -261,7 +261,7 @@ public class TestProtobufUtil {
ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength());
CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV);
Cell newOffheapKV = ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell);
assertTrue(CellComparator.COMPARATOR.compare(offheapKV, newOffheapKV) == 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0);
}
/**

View File

@ -15,630 +15,101 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.Serializable;
import java.util.Comparator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.shaded.com.google.common.primitives.Longs;
/**
* Compare two HBase cells. Do not use this method comparing <code>-ROOT-</code> or
* <code>hbase:meta</code> cells. Cells from these tables need a specialized comparator, one that
* takes account of the special formatting of the row where we have commas to delimit table from
* regionname, from row. See KeyValue for how it has a special comparator to do hbase:meta cells
* and yet another for -ROOT-.
* While using this comparator for {{@link #compareRows(Cell, Cell)} et al, the hbase:meta cells
* format should be taken into consideration, for which the instance of this comparator
* should be used. In all other cases the static APIs in this comparator would be enough
* Comparator for comparing cells and has some specialized methods that allows comparing individual
* cell components like row, family, qualifier and timestamp
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value="UNKNOWN",
justification="Findbugs doesn't like the way we are negating the result of a compare in below")
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@InterfaceStability.Evolving
public class CellComparator implements Comparator<Cell>, Serializable {
static final Log LOG = LogFactory.getLog(CellComparator.class);
private static final long serialVersionUID = -8760041766259623329L;
public interface CellComparator extends Comparator<Cell> {
/**
* Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion
* of KeyValue only.
* Lexographically compares two cells. The key part of the cell is taken for comparison which
* includes row, family, qualifier, timestamp and type
* @param leftCell the left hand side cell
* @param rightCell the right hand side cell
* @return greater than 0 if leftCell is bigger, less than 0 if rightCell is bigger, 0 if both
* cells are equal
*/
public static final CellComparator COMPARATOR = new CellComparator();
/**
* A {@link CellComparator} for <code>hbase:meta</code> catalog table
* {@link KeyValue}s.
*/
public static final CellComparator META_COMPARATOR = new MetaCellComparator();
@Override
public int compare(Cell a, Cell b) {
return compare(a, b, false);
}
int compare(Cell leftCell, Cell rightCell);
/**
* Compares only the key portion of a cell. It does not include the sequence id/mvcc of the
* cell
* @param left
* @param right
* @return an int greater than 0 if left &gt; than right
* lesser than 0 if left &lt; than right
* equal to 0 if left is equal to right
* Lexographically compares the rows of two cells.
* @param leftCell the left hand side cell
* @param rightCell the right hand side cell
* @return greater than 0 if leftCell is bigger, less than 0 if rightCell is bigger, 0 if both
* cells are equal
*/
public final int compareKeyIgnoresMvcc(Cell left, Cell right) {
return compare(left, right, true);
}
/**
* Used when a cell needs to be compared with a key byte[] such as cases of
* finding the index from the index block, bloom keys from the bloom blocks
* This byte[] is expected to be serialized in the KeyValue serialization format
* If the KeyValue (Cell's) serialization format changes this method cannot be used.
* @param left the cell to be compared
* @param key the serialized key part of a KeyValue
* @param offset the offset in the key byte[]
* @param length the length of the key byte[]
* @return an int greater than 0 if left is greater than right
* lesser than 0 if left is lesser than right
* equal to 0 if left is equal to right
*/
public final int compare(Cell left, byte[] key, int offset, int length) {
// row
short rrowlength = Bytes.toShort(key, offset);
int c = compareRows(left, key, offset + Bytes.SIZEOF_SHORT, rrowlength);
if (c != 0) return c;
// Compare the rest of the two KVs without making any assumptions about
// the common prefix. This function will not compare rows anyway, so we
// don't need to tell it that the common prefix includes the row.
return compareWithoutRow(left, key, offset, length, rrowlength);
}
/**
* Compare cells.
* @param a
* @param b
* @param ignoreSequenceid True if we are to compare the key portion only and ignore
* the sequenceid. Set to false to compare key and consider sequenceid.
* @return 0 if equal, -1 if a &lt; b, and +1 if a &gt; b.
*/
private final int compare(final Cell a, final Cell b, boolean ignoreSequenceid) {
// row
int c = compareRows(a, b);
if (c != 0) return c;
c = compareWithoutRow(a, b);
if(c != 0) return c;
if (!ignoreSequenceid) {
// Negate following comparisons so later edits show up first
// mvccVersion: later sorts first
return Longs.compare(b.getSequenceId(), a.getSequenceId());
} else {
return c;
}
}
/**
* Compares the family and qualifier part of the cell
* @param left the left cell
* @param right the right cell
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
public final static int compareColumns(final Cell left, final Cell right) {
int diff = compareFamilies(left, right);
if (diff != 0) {
return diff;
}
return compareQualifiers(left, right);
}
private final static int compareColumns(Cell left, byte[] right, int rfoffset, int rflength,
int rqoffset, int rqlength) {
int diff = compareFamilies(left, right, rfoffset, rflength);
if (diff != 0)
return diff;
return compareQualifiers(left, right, rqoffset, rqlength);
}
/**
* Compare the families of left and right cell
* @param left
* @param right
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
public final static int compareFamilies(Cell left, Cell right) {
if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferCell) left).getFamilyPosition(), left.getFamilyLength(),
((ByteBufferCell) right).getFamilyByteBuffer(),
((ByteBufferCell) right).getFamilyPosition(), right.getFamilyLength());
}
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferCell) left).getFamilyPosition(), left.getFamilyLength(),
right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength());
}
if (right instanceof ByteBufferCell) {
// Notice how we flip the order of the compare here. We used to negate the return value but
// see what FindBugs says
// http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO
// It suggest flipping the order to get same effect and 'safer'.
return ByteBufferUtils.compareTo(
left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(),
((ByteBufferCell)right).getFamilyByteBuffer(),
((ByteBufferCell)right).getFamilyPosition(), right.getFamilyLength());
}
return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(),
right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength());
}
private final static int compareFamilies(Cell left, byte[] right, int roffset, int rlength) {
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferCell) left).getFamilyPosition(), left.getFamilyLength(), right,
roffset, rlength);
}
return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(),
right, roffset, rlength);
}
/**
* Compare the qualifiers part of the left and right cells.
* @param left
* @param right
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
public final static int compareQualifiers(Cell left, Cell right) {
if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils
.compareTo(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferCell) left).getQualifierPosition(),
left.getQualifierLength(), ((ByteBufferCell) right).getQualifierByteBuffer(),
((ByteBufferCell) right).getQualifierPosition(),
right.getQualifierLength());
}
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferCell) left).getQualifierPosition(), left.getQualifierLength(),
right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength());
}
if (right instanceof ByteBufferCell) {
// Notice how we flip the order of the compare here. We used to negate the return value but
// see what FindBugs says
// http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO
// It suggest flipping the order to get same effect and 'safer'.
return ByteBufferUtils.compareTo(left.getQualifierArray(),
left.getQualifierOffset(), left.getQualifierLength(),
((ByteBufferCell)right).getQualifierByteBuffer(),
((ByteBufferCell)right).getQualifierPosition(), right.getQualifierLength());
}
return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(),
left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(),
right.getQualifierLength());
}
public final static int compareQualifiers(Cell left, byte[] right, int rOffset, int rLength) {
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferCell) left).getQualifierPosition(), left.getQualifierLength(),
right, rOffset, rLength);
}
return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(),
left.getQualifierLength(), right, rOffset, rLength);
}
/**
* Compare columnFamily, qualifier, timestamp, and key type (everything
* except the row). This method is used both in the normal comparator and
* the "same-prefix" comparator. Note that we are assuming that row portions
* of both KVs have already been parsed and found identical, and we don't
* validate that assumption here.
* @param commonPrefix
* the length of the common prefix of the two key-values being
* compared, including row length and row
*/
private final int compareWithoutRow(Cell left,
byte[] right, int roffset, int rlength, short rowlength) {
/***
* KeyValue Format and commonLength:
* |_keyLen_|_valLen_|_rowLen_|_rowKey_|_famiLen_|_fami_|_Quali_|....
* ------------------|-------commonLength--------|--------------
*/
int commonLength = KeyValue.ROW_LENGTH_SIZE + KeyValue.FAMILY_LENGTH_SIZE + rowlength;
// commonLength + TIMESTAMP_TYPE_SIZE
int commonLengthWithTSAndType = KeyValue.TIMESTAMP_TYPE_SIZE + commonLength;
// ColumnFamily + Qualifier length.
int lcolumnlength = left.getFamilyLength() + left.getQualifierLength();
int rcolumnlength = rlength - commonLengthWithTSAndType;
byte ltype = left.getTypeByte();
byte rtype = right[roffset + (rlength - 1)];
// If the column is not specified, the "minimum" key type appears the
// latest in the sorted order, regardless of the timestamp. This is used
// for specifying the last key/value in a given row, because there is no
// "lexicographically last column" (it would be infinitely long). The
// "maximum" key type does not need this behavior.
if (lcolumnlength == 0 && ltype == Type.Minimum.getCode()) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if (rcolumnlength == 0 && rtype == Type.Minimum.getCode()) {
return -1;
}
int rfamilyoffset = commonLength + roffset;
// Column family length.
int lfamilylength = left.getFamilyLength();
int rfamilylength = right[rfamilyoffset - 1];
// If left family size is not equal to right family size, we need not
// compare the qualifiers.
boolean sameFamilySize = (lfamilylength == rfamilylength);
if (!sameFamilySize) {
// comparing column family is enough.
return compareFamilies(left, right, rfamilyoffset, rfamilylength);
}
// Compare family & qualifier together.
// Families are same. Compare on qualifiers.
int comparison = compareColumns(left, right, rfamilyoffset, rfamilylength, rfamilyoffset
+ rfamilylength, (rcolumnlength - rfamilylength));
if (comparison != 0) {
return comparison;
}
// //
// Next compare timestamps.
long rtimestamp = Bytes.toLong(right, roffset + (rlength - KeyValue.TIMESTAMP_TYPE_SIZE));
int compare = compareTimestamps(left.getTimestamp(), rtimestamp);
if (compare != 0) {
return compare;
}
// Compare types. Let the delete types sort ahead of puts; i.e. types
// of higher numbers sort before those of lesser numbers. Maximum (255)
// appears ahead of everything, and minimum (0) appears after
// everything.
return (0xff & rtype) - (0xff & ltype);
}
/**
* Compares the rows of the left and right cell.
* For the hbase:meta case this method is overridden such that it can handle hbase:meta cells.
* The caller should ensure using the appropriate comparator for hbase:meta.
* @param left
* @param right
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
public int compareRows(final Cell left, final Cell right) {
// left and right can be exactly the same at the beginning of a row
if (left == right) {
return 0;
}
if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferCell) left).getRowPosition(), left.getRowLength(),
((ByteBufferCell) right).getRowByteBuffer(),
((ByteBufferCell) right).getRowPosition(), right.getRowLength());
}
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferCell) left).getRowPosition(), left.getRowLength(),
right.getRowArray(), right.getRowOffset(), right.getRowLength());
}
if (right instanceof ByteBufferCell) {
// Notice how we flip the order of the compare here. We used to negate the return value but
// see what FindBugs says
// http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO
// It suggest flipping the order to get same effect and 'safer'.
return ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(),
((ByteBufferCell)right).getRowByteBuffer(),
((ByteBufferCell)right).getRowPosition(), right.getRowLength());
}
return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(),
right.getRowArray(), right.getRowOffset(), right.getRowLength());
}
int compareRows(Cell leftCell, Cell rightCell);
/**
* Compares the row part of the cell with a simple plain byte[] like the
* stopRow in Scan. This should be used with context where for hbase:meta
* cells the {{@link #META_COMPARATOR} should be used
*
* @param left
* the cell to be compared
* @param right
* the kv serialized byte[] to be compared with
* @param roffset
* the offset in the byte[]
* @param rlength
* the length in the byte[]
* @return 0 if both cell and the byte[] are equal, 1 if the cell is bigger
* than byte[], -1 otherwise
* stopRow in Scan.
* @param cell the cell
* @param bytes the byte[] representing the row to be compared with
* @param offset the offset of the byte[]
* @param length the length of the byte[]
* @return greater than 0 if leftCell is bigger, less than 0 if rightCell is bigger, 0 if both
* cells are equal
*/
public int compareRows(Cell left, byte[] right, int roffset, int rlength) {
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferCell) left).getRowPosition(), left.getRowLength(), right,
roffset, rlength);
}
return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right,
roffset, rlength);
}
public static int compareWithoutRow(final Cell left, final Cell right) {
// If the column is not specified, the "minimum" key type appears the
// latest in the sorted order, regardless of the timestamp. This is used
// for specifying the last key/value in a given row, because there is no
// "lexicographically last column" (it would be infinitely long). The
// "maximum" key type does not need this behavior.
// Copied from KeyValue. This is bad in that we can't do memcmp w/ special rules like this.
int lFamLength = left.getFamilyLength();
int rFamLength = right.getFamilyLength();
int lQualLength = left.getQualifierLength();
int rQualLength = right.getQualifierLength();
if (lFamLength + lQualLength == 0
&& left.getTypeByte() == Type.Minimum.getCode()) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if (rFamLength + rQualLength == 0
&& right.getTypeByte() == Type.Minimum.getCode()) {
return -1;
}
if (lFamLength != rFamLength) {
// comparing column family is enough.
return compareFamilies(left, right);
}
// Compare cf:qualifier
int diff = compareColumns(left, right);
if (diff != 0) return diff;
diff = compareTimestamps(left, right);
if (diff != 0) return diff;
// Compare types. Let the delete types sort ahead of puts; i.e. types
// of higher numbers sort before those of lesser numbers. Maximum (255)
// appears ahead of everything, and minimum (0) appears after
// everything.
return (0xff & right.getTypeByte()) - (0xff & left.getTypeByte());
}
int compareRows(Cell cell, byte[] bytes, int offset, int length);
/**
* Compares cell's timestamps in DESCENDING order.
* The below older timestamps sorting ahead of newer timestamps looks
* wrong but it is intentional. This way, newer timestamps are first
* found when we iterate over a memstore and newer versions are the
* first we trip over when reading from a store file.
* @return 1 if left's timestamp &lt; right's timestamp
* -1 if left's timestamp &gt; right's timestamp
* 0 if both timestamps are equal
* Lexographically compares the two cells excluding the row part. It compares family, qualifier,
* timestamp and the type
* @param leftCell the left hand side cell
* @param rightCell the right hand side cell
* @return greater than 0 if leftCell is bigger, less than 0 if rightCell is bigger, 0 if both
* cells are equal
*/
public static int compareTimestamps(final Cell left, final Cell right) {
return compareTimestamps(left.getTimestamp(), right.getTimestamp());
}
int compareWithoutRow(Cell leftCell, Cell rightCell);
/**
* Used to compare two cells based on the column hint provided. This is specifically
* used when we need to optimize the seeks based on the next indexed key. This is an
* advanced usage API specifically needed for some optimizations.
* @param nextIndexedCell the next indexed cell
* @param currentCell the cell to be compared
* @param foff the family offset of the currentCell
* @param flen the family length of the currentCell
* @param colHint the column hint provided - could be null
* @param coff the offset of the column hint if provided, if not offset of the currentCell's
* qualifier
* @param clen the length of the column hint if provided, if not length of the currentCell's
* qualifier
* @param ts the timestamp to be seeked
* @param type the type to be seeked
* @return an int based on the given column hint
* TODO : To be moved out of here because this is a special API used in scan
* optimization.
* Lexographically compares the families of the two cells
* @param leftCell the left hand side cell
* @param rightCell the right hand side cell
* @return greater than 0 if leftCell is bigger, less than 0 if rightCell is bigger, 0 if both
* cells are equal
*/
// compare a key against row/fam/qual/ts/type
public final int compareKeyBasedOnColHint(Cell nextIndexedCell, Cell currentCell, int foff,
int flen, byte[] colHint, int coff, int clen, long ts, byte type) {
int compare = compareRows(nextIndexedCell, currentCell);
if (compare != 0) {
return compare;
}
// If the column is not specified, the "minimum" key type appears the
// latest in the sorted order, regardless of the timestamp. This is used
// for specifying the last key/value in a given row, because there is no
// "lexicographically last column" (it would be infinitely long). The
// "maximum" key type does not need this behavior.
if (nextIndexedCell.getFamilyLength() + nextIndexedCell.getQualifierLength() == 0
&& nextIndexedCell.getTypeByte() == Type.Minimum.getCode()) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if (flen + clen == 0 && type == Type.Minimum.getCode()) {
return -1;
}
compare = compareFamilies(nextIndexedCell, currentCell);
if (compare != 0) {
return compare;
}
if (colHint == null) {
compare = compareQualifiers(nextIndexedCell, currentCell);
} else {
compare = compareQualifiers(nextIndexedCell, colHint, coff, clen);
}
if (compare != 0) {
return compare;
}
// Next compare timestamps.
compare = compareTimestamps(nextIndexedCell.getTimestamp(), ts);
if (compare != 0) {
return compare;
}
// Compare types. Let the delete types sort ahead of puts; i.e. types
// of higher numbers sort before those of lesser numbers. Maximum (255)
// appears ahead of everything, and minimum (0) appears after
// everything.
return (0xff & type) - (0xff & nextIndexedCell.getTypeByte());
}
int compareFamilies(Cell leftCell, Cell rightCell);
/**
* Compares timestamps in DESCENDING order.
* The below older timestamps sorting ahead of newer timestamps looks
* wrong but it is intentional. This way, newer timestamps are first
* found when we iterate over a memstore and newer versions are the
* first we trip over when reading from a store file.
* @return 1 if left timestamp &lt; right timestamp
* -1 if left timestamp &gt; right timestamp
* 0 if both timestamps are equal
* Lexographically compares the qualifiers of the two cells
* @param leftCell the left hand side cell
* @param rightCell the right hand side cell
* @return greater than 0 if leftCell is bigger, less than 0 if rightCell is bigger, 0 if both
* cells are equal
*/
public static int compareTimestamps(final long ltimestamp, final long rtimestamp) {
if (ltimestamp < rtimestamp) {
return 1;
} else if (ltimestamp > rtimestamp) {
return -1;
}
return 0;
}
int compareQualifiers(Cell leftCell, Cell rightCell);
/**
* Compare cell's row against given comparator
* @param cell
* @param comparator
* @return result comparing cell's row
* Compares cell's timestamps in DESCENDING order. The below older timestamps sorting ahead of
* newer timestamps looks wrong but it is intentional. This way, newer timestamps are first found
* when we iterate over a memstore and newer versions are the first we trip over when reading from
* a store file.
* @param leftCell the left hand side cell
* @param rightCell the right hand side cell
* @return 1 if left's timestamp &lt; right's timestamp -1 if left's timestamp &gt; right's
* timestamp 0 if both timestamps are equal
*/
public static int compareRow(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferCell) cell).getRowPosition(), cell.getRowLength());
}
return comparator.compareTo(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
}
int compareTimestamps(Cell leftCell, Cell rightCell);
/**
* Compare cell's column family against given comparator
* @param cell
* @param comparator
* @return result comparing cell's column family
* Compares cell's timestamps in DESCENDING order. The below older timestamps sorting ahead of
* newer timestamps looks wrong but it is intentional. This way, newer timestamps are first found
* when we iterate over a memstore and newer versions are the first we trip over when reading from
* a store file.
* @param leftCellts the left cell's timestamp
* @param rightCellts the right cell's timestamp
* @return 1 if left's timestamp &lt; right's timestamp -1 if left's timestamp &gt; right's
* timestamp 0 if both timestamps are equal
*/
public static int compareFamily(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferCell) cell).getFamilyByteBuffer(),
((ByteBufferCell) cell).getFamilyPosition(), cell.getFamilyLength());
}
return comparator.compareTo(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength());
}
/**
* Compare cell's qualifier against given comparator
* @param cell
* @param comparator
* @return result comparing cell's qualifier
*/
public static int compareQualifier(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferCell) cell).getQualifierPosition(), cell.getQualifierLength());
}
return comparator.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength());
}
/**
* Compare cell's value against given comparator
* @param cell
* @param comparator
* @return result comparing cell's value
*/
public static int compareValue(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferCell) cell).getValueByteBuffer(),
((ByteBufferCell) cell).getValuePosition(), cell.getValueLength());
}
return comparator.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
}
/**
* A {@link CellComparator} for <code>hbase:meta</code> catalog table
* {@link KeyValue}s.
*/
public static class MetaCellComparator extends CellComparator {
@Override
public int compareRows(final Cell left, final Cell right) {
return compareRows(left.getRowArray(), left.getRowOffset(), left.getRowLength(),
right.getRowArray(), right.getRowOffset(), right.getRowLength());
}
@Override
public int compareRows(Cell left, byte[] right, int roffset, int rlength) {
return compareRows(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right,
roffset, rlength);
}
private int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset,
int rlength) {
int leftDelimiter = Bytes.searchDelimiterIndex(left, loffset, llength, HConstants.DELIMITER);
int rightDelimiter = Bytes
.searchDelimiterIndex(right, roffset, rlength, HConstants.DELIMITER);
// Compare up to the delimiter
int lpart = (leftDelimiter < 0 ? llength : leftDelimiter - loffset);
int rpart = (rightDelimiter < 0 ? rlength : rightDelimiter - roffset);
int result = Bytes.compareTo(left, loffset, lpart, right, roffset, rpart);
if (result != 0) {
return result;
} else {
if (leftDelimiter < 0 && rightDelimiter >= 0) {
return -1;
} else if (rightDelimiter < 0 && leftDelimiter >= 0) {
return 1;
} else if (leftDelimiter < 0 && rightDelimiter < 0) {
return 0;
}
}
// Compare middle bit of the row.
// Move past delimiter
leftDelimiter++;
rightDelimiter++;
int leftFarDelimiter = Bytes.searchDelimiterIndexInReverse(left, leftDelimiter, llength
- (leftDelimiter - loffset), HConstants.DELIMITER);
int rightFarDelimiter = Bytes.searchDelimiterIndexInReverse(right, rightDelimiter, rlength
- (rightDelimiter - roffset), HConstants.DELIMITER);
// Now compare middlesection of row.
lpart = (leftFarDelimiter < 0 ? llength + loffset : leftFarDelimiter) - leftDelimiter;
rpart = (rightFarDelimiter < 0 ? rlength + roffset : rightFarDelimiter) - rightDelimiter;
result = Bytes.compareTo(left, leftDelimiter, lpart, right, rightDelimiter, rpart);
if (result != 0) {
return result;
} else {
if (leftDelimiter < 0 && rightDelimiter >= 0) {
return -1;
} else if (rightDelimiter < 0 && leftDelimiter >= 0) {
return 1;
} else if (leftDelimiter < 0 && rightDelimiter < 0) {
return 0;
}
}
// Compare last part of row, the rowid.
leftFarDelimiter++;
rightFarDelimiter++;
result = Bytes.compareTo(left, leftFarDelimiter, llength - (leftFarDelimiter - loffset),
right, rightFarDelimiter, rlength - (rightFarDelimiter - roffset));
return result;
}
}
int compareTimestamps(long leftCellts, long rightCellts);
}

View File

@ -0,0 +1,381 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.shaded.com.google.common.primitives.Longs;
/**
* Compare two HBase cells. Do not use this method comparing <code>-ROOT-</code> or
* <code>hbase:meta</code> cells. Cells from these tables need a specialized comparator, one that
* takes account of the special formatting of the row where we have commas to delimit table from
* regionname, from row. See KeyValue for how it has a special comparator to do hbase:meta cells
* and yet another for -ROOT-.
* While using this comparator for {{@link #compareRows(Cell, Cell)} et al, the hbase:meta cells
* format should be taken into consideration, for which the instance of this comparator
* should be used. In all other cases the static APIs in this comparator would be enough
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value="UNKNOWN",
justification="Findbugs doesn't like the way we are negating the result of a compare in below")
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class CellComparatorImpl implements CellComparator {
static final Log LOG = LogFactory.getLog(CellComparatorImpl.class);
/**
* Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion
* of KeyValue only.
*/
public static final CellComparatorImpl COMPARATOR = new CellComparatorImpl();
/**
* A {@link CellComparatorImpl} for <code>hbase:meta</code> catalog table
* {@link KeyValue}s.
*/
public static final CellComparatorImpl META_COMPARATOR = new MetaCellComparator();
@Override
public int compare(Cell a, Cell b) {
return compare(a, b, false);
}
/**
* Compare cells.
* @param a
* @param b
* @param ignoreSequenceid True if we are to compare the key portion only and ignore
* the sequenceid. Set to false to compare key and consider sequenceid.
* @return 0 if equal, -1 if a &lt; b, and +1 if a &gt; b.
*/
public final int compare(final Cell a, final Cell b, boolean ignoreSequenceid) {
// row
int c = compareRows(a, b);
if (c != 0) return c;
c = compareWithoutRow(a, b);
if(c != 0) return c;
if (!ignoreSequenceid) {
// Negate following comparisons so later edits show up first
// mvccVersion: later sorts first
return Longs.compare(b.getSequenceId(), a.getSequenceId());
} else {
return c;
}
}
/**
* Compares the family and qualifier part of the cell
* @param left the left cell
* @param right the right cell
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
public final int compareColumns(final Cell left, final Cell right) {
int diff = compareFamilies(left, right);
if (diff != 0) {
return diff;
}
return compareQualifiers(left, right);
}
/**
* Compare the families of left and right cell
* @param left
* @param right
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
@Override
public final int compareFamilies(Cell left, Cell right) {
if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferCell) left).getFamilyPosition(), left.getFamilyLength(),
((ByteBufferCell) right).getFamilyByteBuffer(),
((ByteBufferCell) right).getFamilyPosition(), right.getFamilyLength());
}
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferCell) left).getFamilyPosition(), left.getFamilyLength(),
right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength());
}
if (right instanceof ByteBufferCell) {
// Notice how we flip the order of the compare here. We used to negate the return value but
// see what FindBugs says
// http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO
// It suggest flipping the order to get same effect and 'safer'.
return ByteBufferUtils.compareTo(
left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(),
((ByteBufferCell)right).getFamilyByteBuffer(),
((ByteBufferCell)right).getFamilyPosition(), right.getFamilyLength());
}
return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(),
right.getFamilyArray(), right.getFamilyOffset(), right.getFamilyLength());
}
/**
* Compare the qualifiers part of the left and right cells.
* @param left
* @param right
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
@Override
public final int compareQualifiers(Cell left, Cell right) {
if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils
.compareTo(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferCell) left).getQualifierPosition(),
left.getQualifierLength(), ((ByteBufferCell) right).getQualifierByteBuffer(),
((ByteBufferCell) right).getQualifierPosition(),
right.getQualifierLength());
}
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferCell) left).getQualifierPosition(), left.getQualifierLength(),
right.getQualifierArray(), right.getQualifierOffset(), right.getQualifierLength());
}
if (right instanceof ByteBufferCell) {
// Notice how we flip the order of the compare here. We used to negate the return value but
// see what FindBugs says
// http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO
// It suggest flipping the order to get same effect and 'safer'.
return ByteBufferUtils.compareTo(left.getQualifierArray(),
left.getQualifierOffset(), left.getQualifierLength(),
((ByteBufferCell)right).getQualifierByteBuffer(),
((ByteBufferCell)right).getQualifierPosition(), right.getQualifierLength());
}
return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(),
left.getQualifierLength(), right.getQualifierArray(), right.getQualifierOffset(),
right.getQualifierLength());
}
/**
* Compares the rows of the left and right cell.
* For the hbase:meta case this method is overridden such that it can handle hbase:meta cells.
* The caller should ensure using the appropriate comparator for hbase:meta.
* @param left
* @param right
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
@Override
public int compareRows(final Cell left, final Cell right) {
// left and right can be exactly the same at the beginning of a row
if (left == right) {
return 0;
}
if (left instanceof ByteBufferCell && right instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferCell) left).getRowPosition(), left.getRowLength(),
((ByteBufferCell) right).getRowByteBuffer(),
((ByteBufferCell) right).getRowPosition(), right.getRowLength());
}
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferCell) left).getRowPosition(), left.getRowLength(),
right.getRowArray(), right.getRowOffset(), right.getRowLength());
}
if (right instanceof ByteBufferCell) {
// Notice how we flip the order of the compare here. We used to negate the return value but
// see what FindBugs says
// http://findbugs.sourceforge.net/bugDescriptions.html#RV_NEGATING_RESULT_OF_COMPARETO
// It suggest flipping the order to get same effect and 'safer'.
return ByteBufferUtils.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(),
((ByteBufferCell)right).getRowByteBuffer(),
((ByteBufferCell)right).getRowPosition(), right.getRowLength());
}
return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(),
right.getRowArray(), right.getRowOffset(), right.getRowLength());
}
/**
* Compares the row part of the cell with a simple plain byte[] like the
* stopRow in Scan. This should be used with context where for hbase:meta
* cells the {{@link #META_COMPARATOR} should be used
*
* @param left
* the cell to be compared
* @param right
* the kv serialized byte[] to be compared with
* @param roffset
* the offset in the byte[]
* @param rlength
* the length in the byte[]
* @return 0 if both cell and the byte[] are equal, 1 if the cell is bigger
* than byte[], -1 otherwise
*/
@Override
public int compareRows(Cell left, byte[] right, int roffset, int rlength) {
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getRowByteBuffer(),
((ByteBufferCell) left).getRowPosition(), left.getRowLength(), right,
roffset, rlength);
}
return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right,
roffset, rlength);
}
@Override
public final int compareWithoutRow(final Cell left, final Cell right) {
// If the column is not specified, the "minimum" key type appears the
// latest in the sorted order, regardless of the timestamp. This is used
// for specifying the last key/value in a given row, because there is no
// "lexicographically last column" (it would be infinitely long). The
// "maximum" key type does not need this behavior.
// Copied from KeyValue. This is bad in that we can't do memcmp w/ special rules like this.
int lFamLength = left.getFamilyLength();
int rFamLength = right.getFamilyLength();
int lQualLength = left.getQualifierLength();
int rQualLength = right.getQualifierLength();
if (lFamLength + lQualLength == 0
&& left.getTypeByte() == Type.Minimum.getCode()) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if (rFamLength + rQualLength == 0
&& right.getTypeByte() == Type.Minimum.getCode()) {
return -1;
}
if (lFamLength != rFamLength) {
// comparing column family is enough.
return compareFamilies(left, right);
}
// Compare cf:qualifier
int diff = compareColumns(left, right);
if (diff != 0) return diff;
diff = compareTimestamps(left, right);
if (diff != 0) return diff;
// Compare types. Let the delete types sort ahead of puts; i.e. types
// of higher numbers sort before those of lesser numbers. Maximum (255)
// appears ahead of everything, and minimum (0) appears after
// everything.
return (0xff & right.getTypeByte()) - (0xff & left.getTypeByte());
}
/**
* Compares cell's timestamps in DESCENDING order.
* The below older timestamps sorting ahead of newer timestamps looks
* wrong but it is intentional. This way, newer timestamps are first
* found when we iterate over a memstore and newer versions are the
* first we trip over when reading from a store file.
* @return 1 if left's timestamp &lt; right's timestamp
* -1 if left's timestamp &gt; right's timestamp
* 0 if both timestamps are equal
*/
@Override
public int compareTimestamps(final Cell left, final Cell right) {
return compareTimestamps(left.getTimestamp(), right.getTimestamp());
}
/**
* Compares timestamps in DESCENDING order.
* The below older timestamps sorting ahead of newer timestamps looks
* wrong but it is intentional. This way, newer timestamps are first
* found when we iterate over a memstore and newer versions are the
* first we trip over when reading from a store file.
* @return 1 if left timestamp &lt; right timestamp
* -1 if left timestamp &gt; right timestamp
* 0 if both timestamps are equal
*/
@Override
public int compareTimestamps(final long ltimestamp, final long rtimestamp) {
if (ltimestamp < rtimestamp) {
return 1;
} else if (ltimestamp > rtimestamp) {
return -1;
}
return 0;
}
/**
* A {@link CellComparatorImpl} for <code>hbase:meta</code> catalog table
* {@link KeyValue}s.
*/
public static class MetaCellComparator extends CellComparatorImpl {
@Override
public int compareRows(final Cell left, final Cell right) {
return compareRows(left.getRowArray(), left.getRowOffset(), left.getRowLength(),
right.getRowArray(), right.getRowOffset(), right.getRowLength());
}
@Override
public int compareRows(Cell left, byte[] right, int roffset, int rlength) {
return compareRows(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right,
roffset, rlength);
}
private int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset,
int rlength) {
int leftDelimiter = Bytes.searchDelimiterIndex(left, loffset, llength, HConstants.DELIMITER);
int rightDelimiter = Bytes
.searchDelimiterIndex(right, roffset, rlength, HConstants.DELIMITER);
// Compare up to the delimiter
int lpart = (leftDelimiter < 0 ? llength : leftDelimiter - loffset);
int rpart = (rightDelimiter < 0 ? rlength : rightDelimiter - roffset);
int result = Bytes.compareTo(left, loffset, lpart, right, roffset, rpart);
if (result != 0) {
return result;
} else {
if (leftDelimiter < 0 && rightDelimiter >= 0) {
return -1;
} else if (rightDelimiter < 0 && leftDelimiter >= 0) {
return 1;
} else if (leftDelimiter < 0 && rightDelimiter < 0) {
return 0;
}
}
// Compare middle bit of the row.
// Move past delimiter
leftDelimiter++;
rightDelimiter++;
int leftFarDelimiter = Bytes.searchDelimiterIndexInReverse(left, leftDelimiter, llength
- (leftDelimiter - loffset), HConstants.DELIMITER);
int rightFarDelimiter = Bytes.searchDelimiterIndexInReverse(right, rightDelimiter, rlength
- (rightDelimiter - roffset), HConstants.DELIMITER);
// Now compare middlesection of row.
lpart = (leftFarDelimiter < 0 ? llength + loffset : leftFarDelimiter) - leftDelimiter;
rpart = (rightFarDelimiter < 0 ? rlength + roffset : rightFarDelimiter) - rightDelimiter;
result = Bytes.compareTo(left, leftDelimiter, lpart, right, rightDelimiter, rpart);
if (result != 0) {
return result;
} else {
if (leftDelimiter < 0 && rightDelimiter >= 0) {
return -1;
} else if (rightDelimiter < 0 && leftDelimiter >= 0) {
return 1;
} else if (leftDelimiter < 0 && rightDelimiter < 0) {
return 0;
}
}
// Compare last part of row, the rowid.
leftFarDelimiter++;
rightFarDelimiter++;
result = Bytes.compareTo(left, leftFarDelimiter, llength - (leftFarDelimiter - loffset),
right, rightFarDelimiter, rlength - (rightFarDelimiter - roffset));
return result;
}
}
}

View File

@ -38,8 +38,12 @@ import java.util.Map.Entry;
import java.util.NavigableMap;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceAudience.Private;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.TagCompressionContext;
import org.apache.hadoop.hbase.io.util.Dictionary;
@ -2280,7 +2284,7 @@ public final class CellUtil {
}
public static boolean matchingTimestamp(Cell a, Cell b) {
return CellComparator.compareTimestamps(a.getTimestamp(), b.getTimestamp()) == 0;
return CellComparatorImpl.COMPARATOR.compareTimestamps(a.getTimestamp(), b.getTimestamp()) == 0;
}
public static boolean matchingType(Cell a, Cell b) {
@ -2638,6 +2642,306 @@ public final class CellUtil {
}
}
/**
* Compares only the key portion of a cell. It does not include the sequence id/mvcc of the cell
* @param left
* @param right
* @return an int greater than 0 if left &gt; than right lesser than 0 if left &lt; than right
* equal to 0 if left is equal to right
*/
public static final int compareKeyIgnoresMvcc(CellComparator comparator, Cell left, Cell right) {
return ((CellComparatorImpl) comparator).compare(left, right, true);
}
/**
* Used to compare two cells based on the column hint provided. This is specifically
* used when we need to optimize the seeks based on the next indexed key. This is an
* advanced usage API specifically needed for some optimizations.
* @param nextIndexedCell the next indexed cell
* @param currentCell the cell to be compared
* @param foff the family offset of the currentCell
* @param flen the family length of the currentCell
* @param colHint the column hint provided - could be null
* @param coff the offset of the column hint if provided, if not offset of the currentCell's
* qualifier
* @param clen the length of the column hint if provided, if not length of the currentCell's
* qualifier
* @param ts the timestamp to be seeked
* @param type the type to be seeked
* @return an int based on the given column hint
* TODO : To be moved out of here because this is a special API used in scan
* optimization.
*/
// compare a key against row/fam/qual/ts/type
@InterfaceAudience.Private
public static final int compareKeyBasedOnColHint(CellComparator comparator, Cell nextIndexedCell,
Cell currentCell, int foff, int flen, byte[] colHint, int coff, int clen, long ts,
byte type) {
int compare = comparator.compareRows(nextIndexedCell, currentCell);
if (compare != 0) {
return compare;
}
// If the column is not specified, the "minimum" key type appears the
// latest in the sorted order, regardless of the timestamp. This is used
// for specifying the last key/value in a given row, because there is no
// "lexicographically last column" (it would be infinitely long). The
// "maximum" key type does not need this behavior.
if (nextIndexedCell.getFamilyLength() + nextIndexedCell.getQualifierLength() == 0
&& nextIndexedCell.getTypeByte() == Type.Minimum.getCode()) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if (flen + clen == 0 && type == Type.Minimum.getCode()) {
return -1;
}
compare = comparator.compareFamilies(nextIndexedCell, currentCell);
if (compare != 0) {
return compare;
}
if (colHint == null) {
compare = comparator.compareQualifiers(nextIndexedCell, currentCell);
} else {
compare = compareQualifiers(nextIndexedCell, colHint, coff, clen);
}
if (compare != 0) {
return compare;
}
// Next compare timestamps.
compare = comparator.compareTimestamps(nextIndexedCell.getTimestamp(), ts);
if (compare != 0) {
return compare;
}
// Compare types. Let the delete types sort ahead of puts; i.e. types
// of higher numbers sort before those of lesser numbers. Maximum (255)
// appears ahead of everything, and minimum (0) appears after
// everything.
return (0xff & type) - (0xff & nextIndexedCell.getTypeByte());
}
/**
* Compares the cell's qualifier with the given byte[]
* @param left the cell for which the qualifier has to be compared
* @param right the byte[] having the qualifier
* @param rOffset the offset of the qualifier
* @param rLength the length of the qualifier
* @return greater than 0 if left cell's qualifier is bigger than byte[], lesser than 0 if left
* cell's qualifier is lesser than byte[] and 0 otherwise
*/
public final static int compareQualifiers(Cell left, byte[] right, int rOffset, int rLength) {
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getQualifierByteBuffer(),
((ByteBufferCell) left).getQualifierPosition(), left.getQualifierLength(),
right, rOffset, rLength);
}
return Bytes.compareTo(left.getQualifierArray(), left.getQualifierOffset(),
left.getQualifierLength(), right, rOffset, rLength);
}
/**
* Compare cell's row against given comparator
* @param cell
* @param comparator
* @return result comparing cell's row
*/
@InterfaceAudience.Private
public static int compareRow(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferCell) cell).getRowByteBuffer(),
((ByteBufferCell) cell).getRowPosition(), cell.getRowLength());
}
return comparator.compareTo(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
}
/**
* Compare cell's column family against given comparator
* @param cell
* @param comparator
* @return result comparing cell's column family
*/
@InterfaceAudience.Private
public static int compareFamily(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferCell) cell).getFamilyByteBuffer(),
((ByteBufferCell) cell).getFamilyPosition(), cell.getFamilyLength());
}
return comparator.compareTo(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength());
}
/**
* Compare cell's qualifier against given comparator
* @param cell
* @param comparator
* @return result comparing cell's qualifier
*/
@InterfaceAudience.Private
public static int compareQualifier(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferCell) cell).getQualifierByteBuffer(),
((ByteBufferCell) cell).getQualifierPosition(), cell.getQualifierLength());
}
return comparator.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength());
}
/**
* Compare cell's value against given comparator
* @param cell
* @param comparator
* @return result comparing cell's value
*/
@InterfaceAudience.Private
public static int compareValue(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferCell) {
return comparator.compareTo(((ByteBufferCell) cell).getValueByteBuffer(),
((ByteBufferCell) cell).getValuePosition(), cell.getValueLength());
}
return comparator.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
}
/**
* Used when a cell needs to be compared with a key byte[] such as cases of
* finding the index from the index block, bloom keys from the bloom blocks
* This byte[] is expected to be serialized in the KeyValue serialization format
* If the KeyValue (Cell's) serialization format changes this method cannot be used.
* @param comparator the cell comparator
* @param left the cell to be compared
* @param key the serialized key part of a KeyValue
* @param offset the offset in the key byte[]
* @param length the length of the key byte[]
* @return an int greater than 0 if left is greater than right
* lesser than 0 if left is lesser than right
* equal to 0 if left is equal to right
*/
@VisibleForTesting
public static final int compare(CellComparator comparator, Cell left, byte[] key, int offset,
int length) {
// row
short rrowlength = Bytes.toShort(key, offset);
int c = comparator.compareRows(left, key, offset + Bytes.SIZEOF_SHORT, rrowlength);
if (c != 0) return c;
// Compare the rest of the two KVs without making any assumptions about
// the common prefix. This function will not compare rows anyway, so we
// don't need to tell it that the common prefix includes the row.
return compareWithoutRow(comparator, left, key, offset, length, rrowlength);
}
/**
* Compare columnFamily, qualifier, timestamp, and key type (everything
* except the row). This method is used both in the normal comparator and
* the "same-prefix" comparator. Note that we are assuming that row portions
* of both KVs have already been parsed and found identical, and we don't
* validate that assumption here.
* @param commonPrefix
* the length of the common prefix of the two key-values being
* compared, including row length and row
*/
private static final int compareWithoutRow(CellComparator comparator, Cell left,
byte[] right, int roffset, int rlength, short rowlength) {
/***
* KeyValue Format and commonLength:
* |_keyLen_|_valLen_|_rowLen_|_rowKey_|_famiLen_|_fami_|_Quali_|....
* ------------------|-------commonLength--------|--------------
*/
int commonLength = KeyValue.ROW_LENGTH_SIZE + KeyValue.FAMILY_LENGTH_SIZE + rowlength;
// commonLength + TIMESTAMP_TYPE_SIZE
int commonLengthWithTSAndType = KeyValue.TIMESTAMP_TYPE_SIZE + commonLength;
// ColumnFamily + Qualifier length.
int lcolumnlength = left.getFamilyLength() + left.getQualifierLength();
int rcolumnlength = rlength - commonLengthWithTSAndType;
byte ltype = left.getTypeByte();
byte rtype = right[roffset + (rlength - 1)];
// If the column is not specified, the "minimum" key type appears the
// latest in the sorted order, regardless of the timestamp. This is used
// for specifying the last key/value in a given row, because there is no
// "lexicographically last column" (it would be infinitely long). The
// "maximum" key type does not need this behavior.
if (lcolumnlength == 0 && ltype == Type.Minimum.getCode()) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if (rcolumnlength == 0 && rtype == Type.Minimum.getCode()) {
return -1;
}
int rfamilyoffset = commonLength + roffset;
// Column family length.
int lfamilylength = left.getFamilyLength();
int rfamilylength = right[rfamilyoffset - 1];
// If left family size is not equal to right family size, we need not
// compare the qualifiers.
boolean sameFamilySize = (lfamilylength == rfamilylength);
if (!sameFamilySize) {
// comparing column family is enough.
return compareFamilies(left, right, rfamilyoffset, rfamilylength);
}
// Compare family & qualifier together.
// Families are same. Compare on qualifiers.
int comparison = compareColumns(left, right, rfamilyoffset, rfamilylength,
rfamilyoffset + rfamilylength, (rcolumnlength - rfamilylength));
if (comparison != 0) {
return comparison;
}
// //
// Next compare timestamps.
long rtimestamp = Bytes.toLong(right, roffset + (rlength - KeyValue.TIMESTAMP_TYPE_SIZE));
int compare = comparator.compareTimestamps(left.getTimestamp(), rtimestamp);
if (compare != 0) {
return compare;
}
// Compare types. Let the delete types sort ahead of puts; i.e. types
// of higher numbers sort before those of lesser numbers. Maximum (255)
// appears ahead of everything, and minimum (0) appears after
// everything.
return (0xff & rtype) - (0xff & ltype);
}
/**
* Compares the cell's family with the given byte[]
* @param left the cell for which the family has to be compared
* @param right the byte[] having the family
* @param roffset the offset of the family
* @param rlength the length of the family
* @return greater than 0 if left cell's family is bigger than byte[], lesser than 0 if left
* cell's family is lesser than byte[] and 0 otherwise
*/
public final static int compareFamilies(Cell left, byte[] right, int roffset, int rlength) {
if (left instanceof ByteBufferCell) {
return ByteBufferUtils.compareTo(((ByteBufferCell) left).getFamilyByteBuffer(),
((ByteBufferCell) left).getFamilyPosition(), left.getFamilyLength(), right, roffset,
rlength);
}
return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(),
right, roffset, rlength);
}
/**
* Compares the cell's column (family and qualifier) with the given byte[]
* @param left the cell for which the column has to be compared
* @param right the byte[] having the column
* @param rfoffset the offset of the family
* @param rflength the length of the family
* @param rqoffset the offset of the qualifier
* @param rqlength the length of the qualifier
* @return greater than 0 if left cell's column is bigger than byte[], lesser than 0 if left
* cell's column is lesser than byte[] and 0 otherwise
*/
public final static int compareColumns(Cell left, byte[] right, int rfoffset, int rflength,
int rqoffset, int rqlength) {
int diff = compareFamilies(left, right, rfoffset, rflength);
if (diff != 0) return diff;
return compareQualifiers(left, right, rqoffset, rqlength);
}
@InterfaceAudience.Private
/**
* These cells are used in reseeks/seeks to improve the read performance.

View File

@ -28,7 +28,6 @@ import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -97,14 +96,14 @@ public class KeyValue implements ExtendedCell {
/**
* Comparator for plain key/values; i.e. non-catalog table key/values. Works on Key portion
* of KeyValue only.
* @deprecated Use {@link CellComparator#COMPARATOR} instead. Deprecated for hbase 2.0, remove for hbase 3.0.
* @deprecated Use {@link CellComparatorImpl#COMPARATOR} instead. Deprecated for hbase 2.0, remove for hbase 3.0.
*/
@Deprecated
public static final KVComparator COMPARATOR = new KVComparator();
/**
* A {@link KVComparator} for <code>hbase:meta</code> catalog table
* {@link KeyValue}s.
* @deprecated Use {@link CellComparator#META_COMPARATOR} instead. Deprecated for hbase 2.0, remove for hbase 3.0.
* @deprecated Use {@link CellComparatorImpl#META_COMPARATOR} instead. Deprecated for hbase 2.0, remove for hbase 3.0.
*/
@Deprecated
public static final KVComparator META_COMPARATOR = new MetaComparator();
@ -1608,7 +1607,7 @@ public class KeyValue implements ExtendedCell {
/**
* A {@link KVComparator} for <code>hbase:meta</code> catalog table
* {@link KeyValue}s.
* @deprecated : {@link CellComparator#META_COMPARATOR} to be used. Deprecated for hbase 2.0, remove for hbase 3.0.
* @deprecated : {@link CellComparatorImpl#META_COMPARATOR} to be used. Deprecated for hbase 2.0, remove for hbase 3.0.
*/
@Deprecated
public static class MetaComparator extends KVComparator {
@ -1618,7 +1617,7 @@ public class KeyValue implements ExtendedCell {
*/
@Override
public int compare(final Cell left, final Cell right) {
return CellComparator.META_COMPARATOR.compareKeyIgnoresMvcc(left, right);
return CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.META_COMPARATOR, left, right);
}
@Override
@ -1723,7 +1722,7 @@ public class KeyValue implements ExtendedCell {
* Compare KeyValues. When we compare KeyValues, we only compare the Key
* portion. This means two KeyValues with same Key but different Values are
* considered the same as far as this Comparator is concerned.
* @deprecated : Use {@link CellComparator}. Deprecated for hbase 2.0, remove for hbase 3.0.
* @deprecated : Use {@link CellComparatorImpl}. Deprecated for hbase 2.0, remove for hbase 3.0.
*/
@Deprecated
public static class KVComparator implements RawComparator<Cell>, SamePrefixComparator<byte[]> {
@ -1751,7 +1750,7 @@ public class KeyValue implements ExtendedCell {
* @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
*/
protected int compareRowKey(final Cell left, final Cell right) {
return CellComparator.COMPARATOR.compareRows(left, right);
return CellComparatorImpl.COMPARATOR.compareRows(left, right);
}
/**
@ -1840,7 +1839,7 @@ public class KeyValue implements ExtendedCell {
}
public int compareOnlyKeyPortion(Cell left, Cell right) {
return CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left, right);
return CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, right);
}
/**
@ -1849,12 +1848,12 @@ public class KeyValue implements ExtendedCell {
*/
@Override
public int compare(final Cell left, final Cell right) {
int compare = CellComparator.COMPARATOR.compare(left, right);
int compare = CellComparatorImpl.COMPARATOR.compare(left, right);
return compare;
}
public int compareTimestamps(final Cell left, final Cell right) {
return CellComparator.compareTimestamps(left, right);
return CellComparatorImpl.COMPARATOR.compareTimestamps(left, right);
}
/**
@ -1884,7 +1883,7 @@ public class KeyValue implements ExtendedCell {
int compareColumns(final Cell left, final short lrowlength, final Cell right,
final short rrowlength) {
return CellComparator.compareColumns(left, right);
return CellComparatorImpl.COMPARATOR.compareColumns(left, right);
}
protected int compareColumns(

View File

@ -25,6 +25,7 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.HConstants;
@ -748,7 +749,7 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
@Override
public int compareKey(CellComparator comparator, Cell key) {
keyOnlyKV.setKey(current.keyBuffer, 0, current.keyLength);
return comparator.compareKeyIgnoresMvcc(key, keyOnlyKV);
return CellUtil.compareKeyIgnoresMvcc(comparator, key, keyOnlyKV);
}
@Override
@ -880,7 +881,7 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
qualCommonPrefix);
comp = compareCommonQualifierPrefix(seekCell, keyOnlyKV, qualCommonPrefix);
if (comp == 0) {
comp = CellComparator.compareTimestamps(seekCell, keyOnlyKV);
comp = CellComparatorImpl.COMPARATOR.compareTimestamps(seekCell, keyOnlyKV);
if (comp == 0) {
// Compare types. Let the delete types sort ahead of puts;
// i.e. types

View File

@ -100,8 +100,7 @@ public interface DataBlockEncoder {
* @param decodingCtx
* @return A newly created seeker.
*/
EncodedSeeker createSeeker(CellComparator comparator,
HFileBlockDecodingContext decodingCtx);
EncodedSeeker createSeeker(CellComparator comparator, HFileBlockDecodingContext decodingCtx);
/**
* Creates a encoder specific encoding context

View File

@ -25,6 +25,7 @@ import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.yetus.audience.InterfaceAudience;
@ -107,7 +108,7 @@ public class RowIndexCodecV1 extends AbstractDataBlockEncoder {
dup.limit(sourceAsBuffer.position() + onDiskSize);
return dup.slice();
} else {
RowIndexSeekerV1 seeker = new RowIndexSeekerV1(CellComparator.COMPARATOR,
RowIndexSeekerV1 seeker = new RowIndexSeekerV1(CellComparatorImpl.COMPARATOR,
decodingCtx);
seeker.setCurrentBuffer(new SingleByteBuff(sourceAsBuffer));
List<Cell> kvs = new ArrayList<>();

View File

@ -17,6 +17,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
@ -57,7 +58,7 @@ public class RowIndexEncoderV1 {
throw new IOException("Key cannot be null or empty");
}
if (lastCell != null) {
int keyComp = CellComparator.COMPARATOR.compareRows(lastCell, cell);
int keyComp = CellComparatorImpl.COMPARATOR.compareRows(lastCell, cell);
if (keyComp > 0) {
throw new IOException("Added a key not lexically larger than"
+ " previous. Current cell = " + cell + ", lastCell = " + lastCell);

View File

@ -190,7 +190,7 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker {
}
do {
int comp;
comp = comparator.compareKeyIgnoresMvcc(seekCell, current.currentKey);
comp = CellUtil.compareKeyIgnoresMvcc(comparator, seekCell, current.currentKey);
if (comp == 0) { // exact match
if (seekBefore) {
if (!previous.isValid()) {
@ -244,7 +244,7 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker {
@Override
public int compareKey(CellComparator comparator, Cell key) {
return comparator.compareKeyIgnoresMvcc(key, current.currentKey);
return CellUtil.compareKeyIgnoresMvcc(comparator, key, current.currentKey);
}
protected void decodeFirst() {

View File

@ -31,7 +31,7 @@ import org.junit.experimental.categories.Category;
@Category({MiscTests.class, SmallTests.class})
public class TestCellComparator {
private CellComparator comparator = CellComparator.COMPARATOR;
private CellComparatorImpl comparator = CellComparatorImpl.COMPARATOR;
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
byte[] row_1_0 = Bytes.toBytes("row10");
@ -53,7 +53,7 @@ public class TestCellComparator {
kv1 = new KeyValue(row1, fam2, qual1, val);
kv2 = new KeyValue(row1, fam1, qual1, val);
assertTrue((CellComparator.compareFamilies(kv1, kv2) > 0));
assertTrue((CellComparatorImpl.COMPARATOR.compareFamilies(kv1, kv2) > 0));
kv1 = new KeyValue(row1, fam1, qual1, 1l, val);
kv2 = new KeyValue(row1, fam1, qual1, 2l, val);
@ -72,23 +72,23 @@ public class TestCellComparator {
public void testCompareCellWithKey() throws Exception {
KeyValue kv1 = new KeyValue(row1, fam1, qual1, val);
KeyValue kv2 = new KeyValue(row2, fam1, qual1, val);
assertTrue((comparator.compare(kv1, kv2.getKey(), 0, kv2.getKey().length)) < 0);
assertTrue((CellUtil.compare(comparator, kv1, kv2.getKey(), 0, kv2.getKey().length)) < 0);
kv1 = new KeyValue(row1, fam2, qual1, val);
kv2 = new KeyValue(row1, fam1, qual1, val);
assertTrue((comparator.compare(kv1, kv2.getKey(), 0, kv2.getKey().length)) > 0);
assertTrue((CellUtil.compare(comparator, kv1, kv2.getKey(), 0, kv2.getKey().length)) > 0);
kv1 = new KeyValue(row1, fam1, qual1, 1l, val);
kv2 = new KeyValue(row1, fam1, qual1, 2l, val);
assertTrue((comparator.compare(kv1, kv2.getKey(), 0, kv2.getKey().length)) > 0);
assertTrue((CellUtil.compare(comparator, kv1, kv2.getKey(), 0, kv2.getKey().length)) > 0);
kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put);
kv2 = new KeyValue(row1, fam1, qual1, 1l, Type.Maximum);
assertTrue((comparator.compare(kv1, kv2.getKey(), 0, kv2.getKey().length)) > 0);
assertTrue((CellUtil.compare(comparator, kv1, kv2.getKey(), 0, kv2.getKey().length)) > 0);
kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put);
kv2 = new KeyValue(row1, fam1, qual1, 1l, Type.Put);
assertTrue((comparator.compare(kv1, kv2.getKey(), 0, kv2.getKey().length)) == 0);
assertTrue((CellUtil.compare(comparator, kv1, kv2.getKey(), 0, kv2.getKey().length)) == 0);
}
@Test
@ -105,16 +105,16 @@ public class TestCellComparator {
kv = new KeyValue(r2, f1, q1, v);
buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell2 = new ByteBufferKeyValue(buffer, 0, buffer.remaining());
assertEquals(0, CellComparator.compareColumns(bbCell1, bbCell2));
assertEquals(0, CellComparator.compareColumns(bbCell1, kv));
assertEquals(0, CellComparatorImpl.COMPARATOR.compareColumns(bbCell1, bbCell2));
assertEquals(0, CellComparatorImpl.COMPARATOR.compareColumns(bbCell1, kv));
kv = new KeyValue(r2, f1, q2, v);
buffer = ByteBuffer.wrap(kv.getBuffer());
Cell bbCell3 = new ByteBufferKeyValue(buffer, 0, buffer.remaining());
assertEquals(0, CellComparator.compareFamilies(bbCell2, bbCell3));
assertTrue(CellComparator.compareQualifiers(bbCell2, bbCell3) < 0);
assertTrue(CellComparator.compareColumns(bbCell2, bbCell3) < 0);
assertEquals(0, CellComparatorImpl.COMPARATOR.compareFamilies(bbCell2, bbCell3));
assertTrue(CellComparatorImpl.COMPARATOR.compareQualifiers(bbCell2, bbCell3) < 0);
assertTrue(CellComparatorImpl.COMPARATOR.compareColumns(bbCell2, bbCell3) < 0);
assertEquals(0, CellComparator.COMPARATOR.compareRows(bbCell2, bbCell3));
assertTrue(CellComparator.COMPARATOR.compareRows(bbCell1, bbCell2) < 0);
assertEquals(0, CellComparatorImpl.COMPARATOR.compareRows(bbCell2, bbCell3));
assertTrue(CellComparatorImpl.COMPARATOR.compareRows(bbCell1, bbCell2) < 0);
}
}

View File

@ -108,24 +108,24 @@ public class TestKeyValue extends TestCase {
final byte [] qf = Bytes.toBytes("umn");
KeyValue aaa = new KeyValue(a, fam, qf, a);
KeyValue bbb = new KeyValue(b, fam, qf, b);
assertTrue(CellComparator.COMPARATOR.compare(aaa, bbb) < 0);
assertTrue(CellComparator.COMPARATOR.compare(bbb, aaa) > 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(aaa, bbb) < 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(bbb, aaa) > 0);
// Compare breaks if passed same ByteBuffer as both left and right arguments.
assertTrue(CellComparator.COMPARATOR.compare(bbb, bbb) == 0);
assertTrue(CellComparator.COMPARATOR.compare(aaa, aaa) == 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(bbb, bbb) == 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(aaa, aaa) == 0);
// Do compare with different timestamps.
aaa = new KeyValue(a, fam, qf, 1, a);
bbb = new KeyValue(a, fam, qf, 2, a);
assertTrue(CellComparator.COMPARATOR.compare(aaa, bbb) > 0);
assertTrue(CellComparator.COMPARATOR.compare(bbb, aaa) < 0);
assertTrue(CellComparator.COMPARATOR.compare(aaa, aaa) == 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(aaa, bbb) > 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(bbb, aaa) < 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(aaa, aaa) == 0);
// Do compare with different types. Higher numbered types -- Delete
// should sort ahead of lower numbers; i.e. Put
aaa = new KeyValue(a, fam, qf, 1, KeyValue.Type.Delete, a);
bbb = new KeyValue(a, fam, qf, 1, a);
assertTrue(CellComparator.COMPARATOR.compare(aaa, bbb) < 0);
assertTrue(CellComparator.COMPARATOR.compare(bbb, aaa) > 0);
assertTrue(CellComparator.COMPARATOR.compare(aaa, aaa) == 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(aaa, bbb) < 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(bbb, aaa) > 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(aaa, aaa) == 0);
}
public void testMoreComparisons() throws Exception {
@ -136,7 +136,7 @@ public class TestKeyValue extends TestCase {
Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), now);
KeyValue bbb = new KeyValue(
Bytes.toBytes("TestScanMultipleVersions,,99999999999999"), now);
CellComparator c = CellComparator.META_COMPARATOR;
CellComparator c = CellComparatorImpl.META_COMPARATOR;
assertTrue(c.compare(bbb, aaa) < 0);
KeyValue aaaa = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,,1236023996656"),
@ -151,13 +151,13 @@ public class TestKeyValue extends TestCase {
Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236034574912L,
(byte[])null);
assertTrue(c.compare(x, y) < 0);
comparisons(CellComparator.META_COMPARATOR);
comparisons(CellComparator.COMPARATOR);
metacomparisons(CellComparator.META_COMPARATOR);
comparisons(CellComparatorImpl.META_COMPARATOR);
comparisons(CellComparatorImpl.COMPARATOR);
metacomparisons(CellComparatorImpl.META_COMPARATOR);
}
public void testMetaComparatorTableKeysWithCommaOk() {
CellComparator c = CellComparator.META_COMPARATOR;
CellComparator c = CellComparatorImpl.META_COMPARATOR;
long now = System.currentTimeMillis();
// meta keys values are not quite right. A users can enter illegal values
// from shell when scanning meta.
@ -178,17 +178,17 @@ public class TestKeyValue extends TestCase {
Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
KeyValue rowB = new KeyValue(Bytes.toBytes("testtable,www.hbase.org/%20,99999"),
Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
assertTrue(CellComparator.META_COMPARATOR.compare(rowA, rowB) < 0);
assertTrue(CellComparatorImpl.META_COMPARATOR.compare(rowA, rowB) < 0);
rowA = new KeyValue(Bytes.toBytes("testtable,,1234"), Bytes.toBytes("fam"),
Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
rowB = new KeyValue(Bytes.toBytes("testtable,$www.hbase.org/,99999"),
Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
assertTrue(CellComparator.META_COMPARATOR.compare(rowA, rowB) < 0);
assertTrue(CellComparatorImpl.META_COMPARATOR.compare(rowA, rowB) < 0);
}
private void metacomparisons(final CellComparator c) {
private void metacomparisons(final CellComparatorImpl c) {
long now = System.currentTimeMillis();
assertTrue(c.compare(new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now),
@ -205,7 +205,7 @@ public class TestKeyValue extends TestCase {
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)) > 0);
}
private void comparisons(final CellComparator c) {
private void comparisons(final CellComparatorImpl c) {
long now = System.currentTimeMillis();
assertTrue(c.compare(new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now),
@ -222,7 +222,7 @@ public class TestKeyValue extends TestCase {
}
public void testBinaryKeys() throws Exception {
Set<KeyValue> set = new TreeSet<>(CellComparator.COMPARATOR);
Set<KeyValue> set = new TreeSet<>(CellComparatorImpl.COMPARATOR);
final byte [] fam = Bytes.toBytes("col");
final byte [] qf = Bytes.toBytes("umn");
final byte [] nb = new byte[0];
@ -248,7 +248,7 @@ public class TestKeyValue extends TestCase {
}
assertTrue(assertion);
// Make set with good comparator
set = new TreeSet<>(CellComparator.META_COMPARATOR);
set = new TreeSet<>(CellComparatorImpl.META_COMPARATOR);
Collections.addAll(set, keys);
count = 0;
for (KeyValue k: set) {
@ -288,7 +288,7 @@ public class TestKeyValue extends TestCase {
}
public void testCompareWithoutRow() {
final CellComparator c = CellComparator.COMPARATOR;
final CellComparator c = CellComparatorImpl.COMPARATOR;
byte[] row = Bytes.toBytes("row");
byte[] fa = Bytes.toBytes("fa");
@ -335,7 +335,7 @@ public class TestKeyValue extends TestCase {
}
public void testFirstLastOnRow() {
final CellComparator c = CellComparator.COMPARATOR;
final CellComparator c = CellComparatorImpl.COMPARATOR;
long ts = 1;
byte[] bufferA = new byte[128];
int offsetA = 0;
@ -499,7 +499,7 @@ public class TestKeyValue extends TestCase {
}
public void testMetaKeyComparator() {
CellComparator c = CellComparator.META_COMPARATOR;
CellComparator c = CellComparatorImpl.META_COMPARATOR;
long now = System.currentTimeMillis();
KeyValue a = new KeyValue(Bytes.toBytes("table1"), now);

View File

@ -26,7 +26,7 @@ import java.util.Random;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
import org.apache.hadoop.hbase.Tag;
@ -287,7 +287,7 @@ public class RedundantKVGenerator {
}
}
Collections.sort(result, CellComparator.COMPARATOR);
Collections.sort(result, CellComparatorImpl.COMPARATOR);
return result;
}
@ -383,7 +383,7 @@ public class RedundantKVGenerator {
}
}
Collections.sort(result, CellComparator.COMPARATOR);
Collections.sort(result, CellComparatorImpl.COMPARATOR);
return result;
}

View File

@ -37,7 +37,7 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
@ -84,7 +84,7 @@ public class IntegrationTestImportTsv extends Configured implements Tool {
public TestName name = new TestName();
protected static final Set<KeyValue> simple_expected =
new TreeSet<KeyValue>(CellComparator.COMPARATOR) {
new TreeSet<KeyValue>(CellComparatorImpl.COMPARATOR) {
private static final long serialVersionUID = 1L;
{
byte[] family = Bytes.toBytes("d");
@ -163,7 +163,7 @@ public class IntegrationTestImportTsv extends Configured implements Tool {
assertTrue(
format("Scan produced surprising result. expected: <%s>, actual: %s",
expected, actual),
CellComparator.COMPARATOR.compare(expected, actual) == 0);
CellComparatorImpl.COMPARATOR.compare(expected, actual) == 0);
}
}
assertFalse("Did not consume all expected values.", expectedIt.hasNext());

View File

@ -22,7 +22,7 @@ import java.io.IOException;
import java.util.TreeSet;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.MapReduceCell;
@ -42,7 +42,7 @@ public class CellSortReducer
protected void reduce(ImmutableBytesWritable row, Iterable<Cell> kvs,
Reducer<ImmutableBytesWritable, Cell, ImmutableBytesWritable, Cell>.Context context)
throws java.io.IOException, InterruptedException {
TreeSet<Cell> map = new TreeSet<>(CellComparator.COMPARATOR);
TreeSet<Cell> map = new TreeSet<>(CellComparatorImpl.COMPARATOR);
for (Cell kv : kvs) {
try {
map.add(CellUtil.deepClone(kv));

View File

@ -46,7 +46,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
@ -402,12 +402,12 @@ public class HFileOutputFormat2
wl.writer =
new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs)
.withOutputDir(familydir).withBloomType(bloomType)
.withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext).build();
.withComparator(CellComparatorImpl.COMPARATOR).withFileContext(hFileContext).build();
} else {
wl.writer =
new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs))
.withOutputDir(familydir).withBloomType(bloomType)
.withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext)
.withComparator(CellComparatorImpl.COMPARATOR).withFileContext(hFileContext)
.withFavoredNodes(favoredNodes).build();
}

View File

@ -40,7 +40,7 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
@ -220,7 +220,7 @@ public class Import extends Configured implements Tool {
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
justification="This is wrong, yes, but we should be purging Writables, not fixing them")
public int compareTo(CellWritableComparable o) {
return CellComparator.COMPARATOR.compare(this.kv, ((CellWritableComparable)o).kv);
return CellComparatorImpl.COMPARATOR.compare(this.kv, ((CellWritableComparable)o).kv);
}
public static class CellWritableComparator extends WritableComparator {

View File

@ -27,7 +27,7 @@ import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.Tag;
@ -77,7 +77,7 @@ public class PutSortReducer extends
"putsortreducer.row.threshold", 1L * (1<<30));
Iterator<Put> iter = puts.iterator();
while (iter.hasNext()) {
TreeSet<KeyValue> map = new TreeSet<>(CellComparator.COMPARATOR);
TreeSet<KeyValue> map = new TreeSet<>(CellComparatorImpl.COMPARATOR);
long curSize = 0;
// stop at the end or the RAM threshold
List<Tag> tags = new ArrayList<>();

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
@ -52,7 +52,6 @@ import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Iterators;
public class SyncTable extends Configured implements Tool {
@ -588,18 +587,18 @@ public class SyncTable extends Configured implements Tool {
return -1; // target missing cell
}
int result = CellComparator.compareFamilies(c1, c2);
int result = CellComparatorImpl.COMPARATOR.compareFamilies(c1, c2);
if (result != 0) {
return result;
}
result = CellComparator.compareQualifiers(c1, c2);
result = CellComparatorImpl.COMPARATOR.compareQualifiers(c1, c2);
if (result != 0) {
return result;
}
// note timestamp comparison is inverted - more recent cells first
return CellComparator.compareTimestamps(c1, c2);
return CellComparatorImpl.COMPARATOR.compareTimestamps(c1, c2);
}
@Override

View File

@ -27,7 +27,7 @@ import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.Tag;
@ -144,7 +144,7 @@ public class TextSortReducer extends
"reducer.row.threshold", 1L * (1<<30));
Iterator<Text> iter = lines.iterator();
while (iter.hasNext()) {
Set<KeyValue> kvs = new TreeSet<>(CellComparator.COMPARATOR);
Set<KeyValue> kvs = new TreeSet<>(CellComparatorImpl.COMPARATOR);
long curSize = 0;
// stop at the end or the RAM threshold
while (iter.hasNext() && curSize < threshold) {

View File

@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.CellComparator.MetaCellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl.MetaCellComparator;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.codec.prefixtree.decode.DecoderFactory;
import org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArraySearcher;

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.codec.prefixtree.decode;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta;
import org.apache.hadoop.hbase.codec.prefixtree.decode.column.ColumnReader;
import org.apache.hadoop.hbase.codec.prefixtree.decode.row.RowNodeReader;
@ -419,7 +420,7 @@ public class PrefixTreeArrayScanner extends PrefixTreeCell implements CellScanne
protected int populateNonRowFieldsAndCompareTo(int cellNum, Cell key) {
populateNonRowFields(cellNum);
return comparator.compareKeyIgnoresMvcc(this, key);
return CellUtil.compareKeyIgnoresMvcc(comparator, this, key);
}
protected void populateFirstNonRowFields() {

View File

@ -23,6 +23,7 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
@ -42,7 +43,7 @@ import org.apache.hadoop.hbase.util.ObjectIntPair;
public class PrefixTreeCell extends ByteBufferCell implements SettableSequenceId,
Comparable<Cell> {
// Create a reference here? Can be removed too
protected CellComparator comparator = CellComparator.COMPARATOR;
protected CellComparator comparator = CellComparatorImpl.COMPARATOR;
/********************** static **********************/

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.codec.prefixtree.row.data;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.codec.prefixtree.row.BaseTestRowData;
@ -50,7 +50,7 @@ public class TestRowDataNumberStrings extends BaseTestRowData{
d.add(new KeyValue(row, family, column, 0L, Type.Put, value));
}
Collections.sort(d, CellComparator.COMPARATOR);
Collections.sort(d, CellComparatorImpl.COMPARATOR);
}
@Override

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
@ -209,11 +210,11 @@ public class HalfStoreFileReader extends StoreFileReader {
@Override
public int seekTo(Cell key) throws IOException {
if (top) {
if (getComparator().compareKeyIgnoresMvcc(key, splitCell) < 0) {
if (CellUtil.compareKeyIgnoresMvcc(getComparator(), key, splitCell) < 0) {
return -1;
}
} else {
if (getComparator().compareKeyIgnoresMvcc(key, splitCell) >= 0) {
if (CellUtil.compareKeyIgnoresMvcc(getComparator(), key, splitCell) >= 0) {
// we would place the scanner in the second half.
// it might be an error to return false here ever...
boolean res = delegate.seekBefore(splitCell);
@ -234,11 +235,11 @@ public class HalfStoreFileReader extends StoreFileReader {
// except
// that we call reseekTo (and not seekTo) on the delegate.
if (top) {
if (getComparator().compareKeyIgnoresMvcc(key, splitCell) < 0) {
if (CellUtil.compareKeyIgnoresMvcc(getComparator(), key, splitCell) < 0) {
return -1;
}
} else {
if (getComparator().compareKeyIgnoresMvcc(key, splitCell) >= 0) {
if (CellUtil.compareKeyIgnoresMvcc(getComparator(), key, splitCell) >= 0) {
// we would place the scanner in the second half.
// it might be an error to return false here ever...
boolean res = delegate.seekBefore(splitCell);
@ -260,13 +261,13 @@ public class HalfStoreFileReader extends StoreFileReader {
public boolean seekBefore(Cell key) throws IOException {
if (top) {
Optional<Cell> fk = getFirstKey();
if (getComparator().compareKeyIgnoresMvcc(key, fk.get()) <= 0) {
if (CellUtil.compareKeyIgnoresMvcc(getComparator(), key, fk.get()) <= 0) {
return false;
}
} else {
// The equals sign isn't strictly necessary just here to be consistent
// with seekTo
if (getComparator().compareKeyIgnoresMvcc(key, splitCell) >= 0) {
if (CellUtil.compareKeyIgnoresMvcc(getComparator(), key, splitCell) >= 0) {
boolean ret = this.delegate.seekBefore(splitCell);
if (ret) {
atEnd = false;

View File

@ -28,7 +28,8 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparator.MetaCellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellComparatorImpl.MetaCellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.io.compress.Compression;
@ -109,7 +110,7 @@ public class FixedFileTrailer {
/** Raw key comparator class name in version 3 */
// We could write the actual class name from 2.0 onwards and handle BC
private String comparatorClassName = CellComparator.COMPARATOR.getClass().getName();
private String comparatorClassName = CellComparatorImpl.COMPARATOR.getClass().getName();
/** The encryption key */
private byte[] encryptionKey;
@ -559,11 +560,15 @@ public class FixedFileTrailer {
private static Class<? extends CellComparator> getComparatorClass(String comparatorClassName)
throws IOException {
Class<? extends CellComparator> comparatorKlass;
// for BC
if (comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName())
|| comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName())) {
comparatorKlass = CellComparator.class;
|| comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName())
|| (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator"))) {
comparatorKlass = CellComparatorImpl.class;
} else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName())
|| comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName())) {
|| comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName())
|| (comparatorClassName
.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator"))) {
comparatorKlass = MetaCellComparator.class;
} else if (comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue.RawBytesComparator")
|| comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator")) {

View File

@ -49,6 +49,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HConstants;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.fs.HFileSystem;
@ -276,7 +277,7 @@ public class HFile {
protected Path path;
protected FSDataOutputStream ostream;
protected CellComparator comparator =
CellComparator.COMPARATOR;
CellComparatorImpl.COMPARATOR;
protected InetSocketAddress[] favoredNodes;
private HFileContext fileContext;
protected boolean shouldDropBehind = false;

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
//import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue;
@ -766,7 +767,7 @@ public class HFileBlockIndex {
// TODO avoid array call.
nonRootIndex.asSubByteBuffer(midKeyOffset, midLength, pair);
nonRootIndexkeyOnlyKV.setKey(pair.getFirst(), pair.getSecond(), midLength);
int cmp = comparator.compareKeyIgnoresMvcc(key, nonRootIndexkeyOnlyKV);
int cmp = CellUtil.compareKeyIgnoresMvcc(comparator, key, nonRootIndexkeyOnlyKV);
// key lives above the midpoint
if (cmp > 0)

View File

@ -53,7 +53,7 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@ -379,7 +379,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
do {
Cell cell = scanner.getCell();
if (row != null && row.length != 0) {
int result = CellComparator.COMPARATOR.compareRows(cell, row, 0, row.length);
int result = CellComparatorImpl.COMPARATOR.compareRows(cell, row, 0, row.length);
if (result > 0) {
break;
} else if (result < 0) {
@ -408,7 +408,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
}
// check if rows are in order
if (checkRow && pCell != null) {
if (CellComparator.COMPARATOR.compareRows(pCell, cell) > 0) {
if (CellComparatorImpl.COMPARATOR.compareRows(pCell, cell) > 0) {
err.println("WARNING, previous row is greater then"
+ " current row\n\tfilename -> " + file + "\n\tprevious -> "
+ CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> "
@ -424,7 +424,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
+ "\n\tfilename -> " + file + "\n\tkeyvalue -> "
+ CellUtil.getCellKeyAsString(cell));
}
if (pCell != null && CellComparator.compareFamilies(pCell, cell) != 0) {
if (pCell != null && CellComparatorImpl.COMPARATOR.compareFamilies(pCell, cell) != 0) {
err.println("WARNING, previous kv has different family"
+ " compared to current key\n\tfilename -> " + file
+ "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell)
@ -618,7 +618,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
public void collect(Cell cell) {
valLen.update(cell.getValueLength());
if (prevCell != null &&
CellComparator.COMPARATOR.compareRows(prevCell, cell) != 0) {
CellComparatorImpl.COMPARATOR.compareRows(prevCell, cell) != 0) {
// new row
collectRow();
}

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
@ -104,7 +105,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
private int avgValueLen = -1;
/** Key comparator */
private CellComparator comparator = CellComparator.COMPARATOR;
private CellComparator comparator = CellComparatorImpl.COMPARATOR;
/** Size of this file. */
private final long fileSize;
@ -727,7 +728,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
offsetFromPos += Bytes.SIZEOF_LONG;
blockBuffer.asSubByteBuffer(blockBuffer.position() + offsetFromPos, klen, pair);
bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen);
int comp = reader.getComparator().compareKeyIgnoresMvcc(key, bufBackedKeyOnlyKv);
int comp = CellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, bufBackedKeyOnlyKv);
offsetFromPos += klen + vlen;
if (this.reader.getFileContext().isIncludesTags()) {
// Read short as unsigned, high byte first
@ -810,8 +811,8 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
} else {
// The comparison with no_next_index_key has to be checked
if (this.nextIndexedKey != null &&
(this.nextIndexedKey == KeyValueScanner.NO_NEXT_INDEXED_KEY || reader
.getComparator().compareKeyIgnoresMvcc(key, nextIndexedKey) < 0)) {
(this.nextIndexedKey == KeyValueScanner.NO_NEXT_INDEXED_KEY || CellUtil
.compareKeyIgnoresMvcc(reader.getComparator(), key, nextIndexedKey) < 0)) {
// The reader shall continue to scan the current data block instead
// of querying the
// block index as long as it knows the target key is strictly
@ -864,8 +865,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
return false;
}
Cell firstKey = getFirstKeyCellInBlock(seekToBlock);
if (reader.getComparator()
.compareKeyIgnoresMvcc(firstKey, key) >= 0) {
if (CellUtil.compareKeyIgnoresMvcc(reader.getComparator(), firstKey, key) >= 0) {
long previousBlockOffset = seekToBlock.getPrevBlockOffset();
// The key we are interested in
if (previousBlockOffset == -1) {
@ -1229,7 +1229,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
public int compareKey(CellComparator comparator, Cell key) {
blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, pair);
this.bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), currKeyLen);
return comparator.compareKeyIgnoresMvcc(key, this.bufBackedKeyOnlyKv);
return CellUtil.compareKeyIgnoresMvcc(comparator, key, this.bufBackedKeyOnlyKv);
}
@Override

View File

@ -36,10 +36,11 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.CellComparator.MetaCellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl.MetaCellComparator;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.crypto.Encryption;
@ -176,7 +177,7 @@ public class HFileWriterImpl implements HFile.Writer {
} else {
this.blockEncoder = NoOpDataBlockEncoder.INSTANCE;
}
this.comparator = comparator != null? comparator: CellComparator.COMPARATOR;
this.comparator = comparator != null? comparator: CellComparatorImpl.COMPARATOR;
closeOutputStream = path != null;
this.cacheConf = cacheConf;
@ -238,7 +239,7 @@ public class HFileWriterImpl implements HFile.Writer {
throw new IOException("Key cannot be null or empty");
}
if (lastCell != null) {
int keyComp = comparator.compareKeyIgnoresMvcc(lastCell, cell);
int keyComp = CellUtil.compareKeyIgnoresMvcc(comparator, lastCell, cell);
if (keyComp > 0) {
throw new IOException("Added a key not lexically larger than"
@ -399,7 +400,7 @@ public class HFileWriterImpl implements HFile.Writer {
return CellUtil.createFirstOnRow(midRow);
}
// Rows are same. Compare on families.
diff = CellComparator.compareFamilies(left, right);
diff = comparator.compareFamilies(left, right);
if (diff > 0) {
throw new IllegalArgumentException("Left family sorts after right family; left="
+ CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right));
@ -421,7 +422,7 @@ public class HFileWriterImpl implements HFile.Writer {
return CellUtil.createFirstOnRowFamily(right, midRow, 0, midRow.length);
}
// Families are same. Compare on qualifiers.
diff = CellComparator.compareQualifiers(left, right);
diff = comparator.compareQualifiers(left, right);
if (diff > 0) {
throw new IllegalArgumentException("Left qualifier sorts after right qualifier; left="
+ CellUtil.getCellKeyAsString(left) + ", right=" + CellUtil.getCellKeyAsString(right));

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
@ -681,7 +681,7 @@ public final class MobUtils {
StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, fs)
.withFilePath(path)
.withComparator(CellComparator.COMPARATOR).withBloomType(bloomType)
.withComparator(CellComparatorImpl.COMPARATOR).withBloomType(bloomType)
.withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build();
return w;
}

View File

@ -48,7 +48,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
@ -812,7 +812,7 @@ public class PartitionedMobCompactor extends MobCompactor {
List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(filesToCompact,
false, true, false, false, HConstants.LATEST_TIMESTAMP);
long ttl = HStore.determineTTLFromFamily(column);
ScanInfo scanInfo = new ScanInfo(conf, column, ttl, 0, CellComparator.COMPARATOR);
ScanInfo scanInfo = new ScanInfo(conf, column, ttl, 0, CellComparatorImpl.COMPARATOR);
return new StoreScanner(scanInfo, scanType, scanners);
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.util.ClassSize;

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.yetus.audience.InterfaceAudience;
@ -62,7 +63,7 @@ public class CellChunkImmutableSegment extends ImmutableSegment {
*/
protected CellChunkImmutableSegment(CSLMImmutableSegment segment, MemStoreSize memstoreSize) {
super(segment); // initiailize the upper class
incSize(0,-CSLMImmutableSegment.DEEP_OVERHEAD_CSLM+ CellChunkImmutableSegment.DEEP_OVERHEAD_CCM);
incSize(0,-CSLMImmutableSegment.DEEP_OVERHEAD_CSLM + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM);
int numOfCells = segment.getCellsCount();
// build the new CellSet based on CellChunkMap
reinitializeCellSet(numOfCells, segment.getScanner(Long.MAX_VALUE), segment.getCellSet());

View File

@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
@ -63,7 +64,7 @@ public class DefaultMemStore extends AbstractMemStore {
* Default constructor. Used for tests.
*/
public DefaultMemStore() {
this(HBaseConfiguration.create(), CellComparator.COMPARATOR);
this(HBaseConfiguration.create(), CellComparatorImpl.COMPARATOR);
}
/**

View File

@ -33,7 +33,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@ -81,6 +80,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompareOperator;
@ -3796,7 +3796,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
} else if (result.size() == 1 && !valueIsNull) {
Cell kv = result.get(0);
cellTs = kv.getTimestamp();
int compareResult = CellComparator.compareValue(kv, comparator);
int compareResult = CellUtil.compareValue(kv, comparator);
matches = matches(op, compareResult);
}
// If matches put the new put or delete the new delete
@ -7666,7 +7666,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
/**
* @return Sorted list of <code>cells</code> using <code>comparator</code>
*/
private static List<Cell> sort(List<Cell> cells, final Comparator<Cell> comparator) {
private static List<Cell> sort(List<Cell> cells, final CellComparator comparator) {
Collections.sort(cells, comparator);
return cells;
}
@ -8228,8 +8228,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
@Override
public CellComparator getCellComparator() {
return this.getRegionInfo().isMetaRegion() ? CellComparator.META_COMPARATOR
: CellComparator.COMPARATOR;
return this.getRegionInfo().isMetaRegion() ? CellComparatorImpl.META_COMPARATOR
: CellComparatorImpl.COMPARATOR;
}
public long getMemStoreFlushSize() {

View File

@ -56,6 +56,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompoundConfiguration;
import org.apache.hadoop.hbase.HConstants;
@ -281,9 +282,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
break;
case NONE :
default:
className = DefaultMemStore.class.getName();
this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] {
Configuration.class, CellComparator.class }, new Object[] { conf, this.comparator });
className = DefaultMemStore.class.getName();
this.memstore = ReflectionUtils.newInstance(DefaultMemStore.class,
new Object[] { conf, this.comparator });
}
LOG.info("Memstore class name is " + className);
this.offPeakHours = OffPeakHours.getInstance(conf);
@ -777,7 +778,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
+ CellUtil.getCellKeyAsString(prevCell) + " current="
+ CellUtil.getCellKeyAsString(cell));
}
if (CellComparator.compareFamilies(prevCell, cell) != 0) {
if (CellComparatorImpl.COMPARATOR.compareFamilies(prevCell, cell) != 0) {
throw new InvalidHFileException("Previous key had different"
+ " family compared to current key: path=" + srcPath
+ " previous="

View File

@ -19,9 +19,9 @@
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.io.TimeRange;
import java.util.ArrayList;

View File

@ -19,12 +19,11 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.Collection;
import java.util.Comparator;
import java.util.OptionalDouble;
import java.util.OptionalLong;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@ -49,7 +48,7 @@ public interface Store {
int NO_PRIORITY = Integer.MIN_VALUE;
// General Accessors
Comparator<Cell> getComparator();
CellComparator getComparator();
Collection<? extends StoreFile> getStorefiles();

View File

@ -18,12 +18,12 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.Comparator;
import java.util.Optional;
import java.util.OptionalLong;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.yetus.audience.InterfaceAudience;
@ -54,7 +54,7 @@ public interface StoreFile {
/**
* Get the comparator for comparing two cells.
*/
Comparator<Cell> getComparator();
CellComparator getComparator();
/**
* Get max of the MemstoreTS in the KV's in this store file.

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
@ -371,7 +372,7 @@ public class StoreFileReader {
if (bloomFilterType == BloomType.ROW) {
keyIsAfterLast = (Bytes.BYTES_RAWCOMPARATOR.compare(key, lastBloomKey) > 0);
} else {
keyIsAfterLast = (CellComparator.COMPARATOR.compare(kvKey, lastBloomKeyOnlyKV)) > 0;
keyIsAfterLast = (CellComparatorImpl.COMPARATOR.compare(kvKey, lastBloomKeyOnlyKV)) > 0;
}
}
@ -384,7 +385,7 @@ public class StoreFileReader {
// hbase:meta does not have blooms. So we need not have special interpretation
// of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom
if (keyIsAfterLast
&& (CellComparator.COMPARATOR.compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0) {
&& (CellComparatorImpl.COMPARATOR.compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0) {
exists = false;
} else {
exists =

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
@ -345,7 +346,7 @@ public class StoreFileWriter implements CellSink, ShipperListener {
private final CacheConfig cacheConf;
private final FileSystem fs;
private CellComparator comparator = CellComparator.COMPARATOR;
private CellComparator comparator = CellComparatorImpl.COMPARATOR;
private BloomType bloomType = BloomType.NONE;
private long maxKeyCount = 0;
private Path dir;
@ -470,7 +471,7 @@ public class StoreFileWriter implements CellSink, ShipperListener {
}
if (comparator == null) {
comparator = CellComparator.COMPARATOR;
comparator = CellComparatorImpl.COMPARATOR;
}
return new StoreFileWriter(fs, filePath,
conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext,

View File

@ -549,7 +549,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
}
// Only do a sanity-check if store and comparator are available.
CellComparator comparator = store.map(s -> s.getComparator()).orElse(null);
CellComparator comparator =
store.map(s -> s.getComparator()).orElse(null);
int count = 0;
long totalBytesRead = 0;

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.regionserver.ShipperListener;
/**
@ -84,4 +85,10 @@ public interface DeleteTracker extends ShipperListener {
// deleted in strong semantics of versions(See MvccTracker)
}
/**
* Return the comparator passed to this delete tracker
* @return the cell comparator
*/
CellComparator getCellComparator();
}

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import java.util.NavigableSet;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.yetus.audience.InterfaceAudience;
@ -117,7 +116,7 @@ public class ExplicitColumnTracker implements ColumnTracker {
}
// Compare specific column to current column
int ret = CellComparator.compareQualifiers(cell, column.getBuffer(), column.getOffset(),
int ret = CellUtil.compareQualifiers(cell, column.getBuffer(), column.getOffset(),
column.getLength());
// Column Matches. Return include code. The caller would call checkVersions
@ -215,7 +214,7 @@ public class ExplicitColumnTracker implements ColumnTracker {
*/
public void doneWithColumn(Cell cell) {
while (this.column != null) {
int compare = CellComparator.compareQualifiers(cell, column.getBuffer(), column.getOffset(),
int compare = CellUtil.compareQualifiers(cell, column.getBuffer(), column.getOffset(),
column.getLength());
resetTS();
if (compare >= 0) {

View File

@ -28,6 +28,7 @@ import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.yetus.audience.InterfaceAudience;
@ -55,6 +56,7 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker {
private byte[][] columns;
private int minVersions;
private long oldestStamp;
private CellComparator comparator;
// These two maps have same structure.
// Each node is a versions deletion (DeleteFamily or DeleteColumn). Key is the mvcc of the marker,
@ -71,14 +73,15 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker {
* Note maxVersion and minVersion must set according to cf's conf, not user's scan parameter.
*
* @param columns columns specified user in query
* @param comparartor the cell comparator
* @param minVersion The minimum number of versions to keep(used when TTL is set).
* @param maxVersion The maximum number of versions in CF's conf
* @param resultMaxVersions maximum versions to return per column, which may be different from
* maxVersion
* @param oldestUnexpiredTS the oldest timestamp we are interested in, based on TTL
*/
public NewVersionBehaviorTracker(NavigableSet<byte[]> columns, int minVersion, int maxVersion,
int resultMaxVersions, long oldestUnexpiredTS) {
public NewVersionBehaviorTracker(NavigableSet<byte[]> columns, CellComparator comparartor,
int minVersion, int maxVersion, int resultMaxVersions, long oldestUnexpiredTS) {
this.maxVersions = maxVersion;
this.minVersions = minVersion;
this.resultMaxVersions = resultMaxVersions;
@ -90,6 +93,7 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker {
this.columns[i++] = column;
}
}
this.comparator = comparartor;
reset();
}
@ -373,4 +377,9 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker {
return false;
}
@Override
public CellComparator getCellComparator() {
return this.comparator;
}
}

View File

@ -56,6 +56,11 @@ public class ScanDeleteTracker implements DeleteTracker {
protected int deleteLength = 0;
protected byte deleteType = 0;
protected long deleteTimestamp = 0L;
protected final CellComparator comparator;
public ScanDeleteTracker(CellComparator comparator) {
this.comparator = comparator;
}
/**
* Add the specified KeyValue to the list of deletes to check against for this row operation.
@ -108,7 +113,7 @@ public class ScanDeleteTracker implements DeleteTracker {
}
if (deleteCell != null) {
int ret = -(CellComparator.compareQualifiers(cell, deleteCell));
int ret = -(this.comparator.compareQualifiers(cell, deleteCell));
if (ret == 0) {
if (deleteType == KeyValue.Type.DeleteColumn.getCode()) {
return DeleteResult.COLUMN_DELETED;
@ -166,4 +171,9 @@ public class ScanDeleteTracker implements DeleteTracker {
deleteCell = KeyValueUtil.toNewKeyCell(deleteCell);
}
}
@Override
public CellComparator getCellComparator() {
return this.comparator;
}
}

View File

@ -318,8 +318,8 @@ public abstract class ScanQueryMatcher implements ShipperListener {
* @return result of the compare between the indexed key and the key portion of the passed cell
*/
public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) {
return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell, 0, 0, null, 0, 0,
HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
return CellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null, 0,
0, HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
}
/**
@ -330,10 +330,10 @@ public abstract class ScanQueryMatcher implements ShipperListener {
public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) {
ColumnCount nextColumn = columns.getColumnHint();
if (nextColumn == null) {
return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell, 0, 0, null, 0, 0,
HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
return CellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null,
0, 0, HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
} else {
return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell,
return CellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell,
currentCell.getFamilyOffset(), currentCell.getFamilyLength(), nextColumn.getBuffer(),
nextColumn.getOffset(), nextColumn.getLength(), HConstants.LATEST_TIMESTAMP,
Type.Maximum.getCode());
@ -380,16 +380,18 @@ public abstract class ScanQueryMatcher implements ShipperListener {
DeleteTracker deleteTracker;
if (scanInfo.isNewVersionBehavior() && (userScan == null || !userScan.isRaw())) {
deleteTracker = new NewVersionBehaviorTracker(columns, scanInfo.getMinVersions(),
scanInfo.getMaxVersions(), resultMaxVersion, oldestUnexpiredTS);
deleteTracker = new NewVersionBehaviorTracker(columns, scanInfo.getComparator(),
scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion,
oldestUnexpiredTS);
} else {
deleteTracker = new ScanDeleteTracker();
deleteTracker = new ScanDeleteTracker(scanInfo.getComparator());
}
if (host != null) {
deleteTracker = host.postInstantiateDeleteTracker(deleteTracker);
if (deleteTracker instanceof VisibilityScanDeleteTracker && scanInfo.isNewVersionBehavior()) {
deleteTracker = new VisibilityNewVersionBehaivorTracker(columns, scanInfo.getMinVersions(),
scanInfo.getMaxVersions(), resultMaxVersion, oldestUnexpiredTS);
deleteTracker = new VisibilityNewVersionBehaivorTracker(columns, scanInfo.getComparator(),
scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion,
oldestUnexpiredTS);
}
}

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.regionserver.querymatcher;
import java.io.IOException;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValueUtil;
@ -84,7 +84,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker {
// do not count a delete marker as another version
return checkVersion(type, timestamp);
}
int cmp = CellComparator.compareQualifiers(cell, this.columnCell);
int cmp = CellComparatorImpl.COMPARATOR.compareQualifiers(cell, this.columnCell);
if (cmp == 0) {
if (ignoreCount) {
return ScanQueryMatcher.MatchCode.INCLUDE;

View File

@ -26,7 +26,7 @@ import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
@ -81,7 +81,7 @@ class FSWALEntry extends Entry {
} else {
return cells.stream()
.filter(v -> !CellUtil.matchingFamily(v, WALEdit.METAFAMILY))
.collect(toCollection(() -> new TreeSet<>(CellComparator::compareFamilies)))
.collect(toCollection(() -> new TreeSet<>(CellComparatorImpl.COMPARATOR::compareFamilies)))
.stream()
.map(CellUtil::cloneFamily)
.collect(toCollection(() -> new TreeSet<>(Bytes.BYTES_COMPARATOR)));

View File

@ -623,7 +623,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso
// visibility tags per cell. The covering cells are determined not only
// based on the delete type and ts
// but also on the visibility expression matching.
return new VisibilityScanDeleteTracker();
return new VisibilityScanDeleteTracker(delTracker.getCellComparator());
}
@Override

View File

@ -33,6 +33,7 @@ import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.regionserver.querymatcher.NewVersionBehaviorTracker;
@ -45,10 +46,10 @@ public class VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTrack
private static final Log LOG = LogFactory.getLog(VisibilityNewVersionBehaivorTracker.class);
public VisibilityNewVersionBehaivorTracker(NavigableSet<byte[]> columns, int minVersion,
int maxVersion,
int resultMaxVersions, long oldestUnexpiredTS) {
super(columns, minVersion, maxVersion, resultMaxVersions, oldestUnexpiredTS);
public VisibilityNewVersionBehaivorTracker(NavigableSet<byte[]> columns,
CellComparator cellComparator, int minVersion, int maxVersion, int resultMaxVersions,
long oldestUnexpiredTS) {
super(columns, cellComparator, minVersion, maxVersion, resultMaxVersions, oldestUnexpiredTS);
}
private static class TagInfo {

View File

@ -29,6 +29,7 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.regionserver.querymatcher.ScanDeleteTracker;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
@ -67,8 +68,8 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker {
// TODO : Need to handle puts with same ts but different vis tags.
private List<Pair<List<Tag>, Byte>> visiblityTagsDeleteColumnVersion = new ArrayList<>();
public VisibilityScanDeleteTracker() {
super();
public VisibilityScanDeleteTracker(CellComparator comparator) {
super(comparator);
}
@Override
@ -246,7 +247,7 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker {
}
}
if (deleteCell != null) {
int ret = CellComparator.compareQualifiers(cell, deleteCell);
int ret = CellComparatorImpl.COMPARATOR.compareQualifiers(cell, deleteCell);
if (ret == 0) {
if (deleteType == KeyValue.Type.DeleteColumn.getCode()) {
if (visibilityTagsDeleteColumns != null) {

View File

@ -23,7 +23,7 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter;
import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterBase;
@ -194,7 +194,7 @@ public final class BloomFilterFactory {
// In case of compound Bloom filters we ignore the maxKeys hint.
CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
bloomType == BloomType.ROWCOL ? CellComparator.COMPARATOR : null, bloomType);
bloomType == BloomType.ROWCOL ? CellComparatorImpl.COMPARATOR : null, bloomType);
writer.addInlineBlockWriter(bloomWriter);
return bloomWriter;
}

View File

@ -27,6 +27,7 @@ import java.util.SortedSet;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner;
/**
@ -40,7 +41,7 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner {
private Cell current;
public CollectionBackedScanner(SortedSet<Cell> set) {
this(set, CellComparator.COMPARATOR);
this(set, CellComparatorImpl.COMPARATOR);
}
public CollectionBackedScanner(SortedSet<Cell> set,
@ -51,7 +52,7 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner {
}
public CollectionBackedScanner(List<Cell> list) {
this(list, CellComparator.COMPARATOR);
this(list, CellComparatorImpl.COMPARATOR);
}
public CollectionBackedScanner(List<Cell> list,

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
@ -140,7 +140,7 @@ public class CompressionTest {
scanner.seekTo(); // position to the start of file
// Scanner does not do Cells yet. Do below for now till fixed.
cc = scanner.getCell();
if (CellComparator.COMPARATOR.compareRows(c, cc) != 0) {
if (CellComparatorImpl.COMPARATOR.compareRows(c, cc) != 0) {
throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString());
}
} finally {

View File

@ -23,6 +23,7 @@ import java.io.IOException;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
import org.apache.yetus.audience.InterfaceAudience;

View File

@ -3538,7 +3538,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
int i;
for (i = 0; i < minLen
&& CellComparator.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
&& CellComparatorImpl.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
++i) {}
if (additionalMsg == null) {

View File

@ -366,7 +366,7 @@ public class HFilePerformanceEvaluation {
writer = HFile.getWriterFactoryNoCache(conf)
.withPath(fs, mf)
.withFileContext(hFileContext)
.withComparator(CellComparator.COMPARATOR)
.withComparator(CellComparatorImpl.COMPARATOR)
.create();
}

View File

@ -100,7 +100,7 @@ public class MetaMockingUtil {
}
//important: sort the kvs so that binary search work
Collections.sort(kvs, CellComparator.META_COMPARATOR);
Collections.sort(kvs, CellComparatorImpl.META_COMPARATOR);
return Result.create(kvs);
}

View File

@ -32,7 +32,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
@ -292,7 +292,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
public boolean next(List<Cell> outResult, ScannerContext scannerContext) throws IOException {
boolean next = super.next(outResult, scannerContext);
for (Cell cell : outResult) {
if(CellComparator.COMPARATOR.compareRows(cell, ROW2, 0, ROW2.length) == 0) {
if(CellComparatorImpl.COMPARATOR.compareRows(cell, ROW2, 0, ROW2.length) == 0) {
try {
// hold the compaction
// set doscan to true

View File

@ -32,7 +32,7 @@ import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
@ -71,7 +71,7 @@ public class TestResult extends TestCase {
*/
public void testResultAsCellScanner() throws IOException {
Cell [] cells = genKVs(row, family, value, 1, 10);
Arrays.sort(cells, CellComparator.COMPARATOR);
Arrays.sort(cells, CellComparatorImpl.COMPARATOR);
Result r = Result.create(cells);
assertSame(r, cells);
// Assert I run over same result multiple times.
@ -93,7 +93,7 @@ public class TestResult extends TestCase {
public void testBasicGetColumn() throws Exception {
KeyValue [] kvs = genKVs(row, family, value, 1, 100);
Arrays.sort(kvs, CellComparator.COMPARATOR);
Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
Result r = Result.create(kvs);
@ -132,7 +132,7 @@ public class TestResult extends TestCase {
System.arraycopy(kvs1, 0, kvs, 0, kvs1.length);
System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length);
Arrays.sort(kvs, CellComparator.COMPARATOR);
Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
Result r = Result.create(kvs);
for (int i = 0; i < 100; ++i) {
@ -149,7 +149,7 @@ public class TestResult extends TestCase {
public void testBasicGetValue() throws Exception {
KeyValue [] kvs = genKVs(row, family, value, 1, 100);
Arrays.sort(kvs, CellComparator.COMPARATOR);
Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
Result r = Result.create(kvs);
@ -169,7 +169,7 @@ public class TestResult extends TestCase {
System.arraycopy(kvs1, 0, kvs, 0, kvs1.length);
System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length);
Arrays.sort(kvs, CellComparator.COMPARATOR);
Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
Result r = Result.create(kvs);
for (int i = 0; i < 100; ++i) {
@ -183,7 +183,7 @@ public class TestResult extends TestCase {
public void testBasicLoadValue() throws Exception {
KeyValue [] kvs = genKVs(row, family, value, 1, 100);
Arrays.sort(kvs, CellComparator.COMPARATOR);
Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
Result r = Result.create(kvs);
ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024);
@ -208,7 +208,7 @@ public class TestResult extends TestCase {
System.arraycopy(kvs1, 0, kvs, 0, kvs1.length);
System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length);
Arrays.sort(kvs, CellComparator.COMPARATOR);
Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024);
@ -291,7 +291,7 @@ public class TestResult extends TestCase {
KeyValue [] kvs = genKVs(Bytes.toBytes(rowSB.toString()), family,
Bytes.toBytes(valueSB.toString()), 1, n);
Arrays.sort(kvs, CellComparator.COMPARATOR);
Arrays.sort(kvs, CellComparatorImpl.COMPARATOR);
ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024);
Result r = Result.create(kvs);

View File

@ -31,7 +31,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
@ -154,7 +154,7 @@ public class TestDependentColumnFilter {
for (boolean done = true; done; i++) {
done = scanner.next(results);
Arrays.sort(results.toArray(new Cell[results.size()]),
CellComparator.COMPARATOR);
CellComparatorImpl.COMPARATOR);
LOG.info("counter=" + i + ", " + results);
if (results.isEmpty()) break;
cells += results.size();

View File

@ -31,7 +31,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -1667,7 +1667,7 @@ public class TestFilter {
for (boolean done = true; done; i++) {
done = scanner.next(results);
Arrays.sort(results.toArray(new Cell[results.size()]),
CellComparator.COMPARATOR);
CellComparatorImpl.COMPARATOR);
LOG.info("counter=" + i + ", " + results);
if (results.isEmpty()) break;
assertTrue("Scanned too many rows! Only expected " + expectedRows +
@ -1689,7 +1689,7 @@ public class TestFilter {
for (boolean done = true; done; i++) {
done = scanner.next(results);
Arrays.sort(results.toArray(new Cell[results.size()]),
CellComparator.COMPARATOR);
CellComparatorImpl.COMPARATOR);
LOG.info("counter=" + i + ", " + results);
if(results.isEmpty()) break;
assertTrue("Scanned too many rows! Only expected " + expectedRows +
@ -1711,7 +1711,7 @@ public class TestFilter {
for (boolean done = true; done; row++) {
done = scanner.next(results);
Arrays.sort(results.toArray(new Cell[results.size()]),
CellComparator.COMPARATOR);
CellComparatorImpl.COMPARATOR);
if(results.isEmpty()) break;
assertTrue("Scanned too many keys! Only expected " + kvs.length +
" total but already scanned " + (results.size() + idx) +
@ -1742,7 +1742,7 @@ public class TestFilter {
for (boolean more = true; more; row++) {
more = scanner.next(results);
Arrays.sort(results.toArray(new Cell[results.size()]),
CellComparator.COMPARATOR);
CellComparatorImpl.COMPARATOR);
if(results.isEmpty()) break;
assertTrue("Scanned too many keys! Only expected " + kvs.length +
" total but already scanned " + (results.size() + idx) +

View File

@ -25,7 +25,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@ -500,8 +500,8 @@ public class TestFilterList {
// Should take the min if given two hints
FilterList filterList = new FilterList(Operator.MUST_PASS_ONE,
Arrays.asList(new Filter [] { filterMinHint, filterMaxHint } ));
assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),
minKeyValue));
assertEquals(0,
CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), minKeyValue));
// Should have no hint if any filter has no hint
filterList = new FilterList(Operator.MUST_PASS_ONE,
@ -514,9 +514,9 @@ public class TestFilterList {
// Should give max hint if its the only one
filterList = new FilterList(Operator.MUST_PASS_ONE,
Arrays.asList(new Filter [] { filterMaxHint, filterMaxHint } ));
assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),
maxKeyValue));
Arrays.asList(new Filter[] { filterMaxHint, filterMaxHint }));
assertEquals(0,
CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue));
// MUST PASS ALL
@ -524,32 +524,31 @@ public class TestFilterList {
filterList = new FilterList(Operator.MUST_PASS_ALL,
Arrays.asList(new Filter [] { filterMinHint, filterMaxHint } ));
filterList.filterKeyValue(null);
assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),
maxKeyValue));
assertEquals(0,
CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue));
filterList = new FilterList(Operator.MUST_PASS_ALL,
Arrays.asList(new Filter [] { filterMaxHint, filterMinHint } ));
filterList.filterKeyValue(null);
assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),
maxKeyValue));
assertEquals(0,
CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue));
// Should have first hint even if a filter has no hint
filterList = new FilterList(Operator.MUST_PASS_ALL,
Arrays.asList(
new Filter [] { filterNoHint, filterMinHint, filterMaxHint } ));
Arrays.asList(new Filter[] { filterNoHint, filterMinHint, filterMaxHint }));
filterList.filterKeyValue(null);
assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),
maxKeyValue));
assertEquals(0,
CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue));
filterList = new FilterList(Operator.MUST_PASS_ALL,
Arrays.asList(new Filter [] { filterNoHint, filterMaxHint } ));
Arrays.asList(new Filter[] { filterNoHint, filterMaxHint }));
filterList.filterKeyValue(null);
assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),
maxKeyValue));
assertEquals(0,
CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), maxKeyValue));
filterList = new FilterList(Operator.MUST_PASS_ALL,
Arrays.asList(new Filter [] { filterNoHint, filterMinHint } ));
Arrays.asList(new Filter[] { filterNoHint, filterMinHint }));
filterList.filterKeyValue(null);
assertEquals(0, CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),
minKeyValue));
assertEquals(0,
CellComparatorImpl.COMPARATOR.compare(filterList.getNextCellHint(null), minKeyValue));
}
/**

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.hbase.filter;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@ -69,8 +69,8 @@ public class TestSingleColumnValueExcludeFilter {
filter.filterRowCells(kvs);
assertEquals("resultSize", kvs.size(), 2);
assertTrue("leftKV1", CellComparator.COMPARATOR.compare(kvs.get(0), kv) == 0);
assertTrue("leftKV2", CellComparator.COMPARATOR.compare(kvs.get(1), kv) == 0);
assertTrue("leftKV1", CellComparatorImpl.COMPARATOR.compare(kvs.get(0), kv) == 0);
assertTrue("leftKV2", CellComparatorImpl.COMPARATOR.compare(kvs.get(1), kv) == 0);
assertFalse("allRemainingWhenMatch", filter.filterAllRemaining());
// A 'mismatch' situation

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
@ -164,7 +164,7 @@ public class TestHalfStoreFileReader {
// Ugly code to get the item before the midkey
KeyValue beforeMidKey = null;
for (KeyValue item : items) {
if (CellComparator.COMPARATOR.compare(item, midKV) >= 0) {
if (CellComparatorImpl.COMPARATOR.compare(item, midKV) >= 0) {
break;
}
beforeMidKey = item;

View File

@ -35,7 +35,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@ -209,7 +209,7 @@ public class TestDataBlockEncoders {
.withIncludesTags(includesTags)
.withCompression(Compression.Algorithm.NONE)
.build();
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR,
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
encoder.newDataBlockDecodingContext(meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
encodedSeekers.add(seeker);
@ -285,14 +285,15 @@ public class TestDataBlockEncoders {
.withIncludesTags(includesTags)
.withCompression(Compression.Algorithm.NONE)
.build();
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR,
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
encoder.newDataBlockDecodingContext(meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
int i = 0;
do {
KeyValue expectedKeyValue = sampleKv.get(i);
Cell cell = seeker.getCell();
if (CellComparator.COMPARATOR.compareKeyIgnoresMvcc(expectedKeyValue, cell) != 0) {
if (CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, expectedKeyValue,
cell) != 0) {
int commonPrefix = CellUtil
.findCommonPrefixInFlatKey(expectedKeyValue, cell, false, true);
fail(String.format("next() produces wrong results "
@ -326,7 +327,7 @@ public class TestDataBlockEncoders {
getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData);
Cell key = encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer));
KeyValue firstKv = sampleKv.get(0);
if (0 != CellComparator.COMPARATOR.compareKeyIgnoresMvcc(key, firstKv)) {
if (0 != CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, key, firstKv)) {
int commonPrefix = CellUtil.findCommonPrefixInFlatKey(key, firstKv, false, true);
fail(String.format("Bug in '%s' commonPrefix %d", encoder.toString(), commonPrefix));
}

View File

@ -36,7 +36,7 @@ import java.util.concurrent.ConcurrentSkipListSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
@ -73,7 +73,7 @@ public class TestPrefixTreeEncoding {
private static final int NUM_COLS_PER_ROW = 20;
private int numBatchesWritten = 0;
private ConcurrentSkipListSet<Cell> kvset = new ConcurrentSkipListSet<>(CellComparator.COMPARATOR);
private ConcurrentSkipListSet<Cell> kvset = new ConcurrentSkipListSet<>(CellComparatorImpl.COMPARATOR);
private static boolean formatRowNum = false;
@ -113,7 +113,7 @@ public class TestPrefixTreeEncoding {
DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
generateFixedTestData(kvset, batchId, false, includesTag, encoder, blkEncodingCtx,
userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR,
EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
@ -156,7 +156,7 @@ public class TestPrefixTreeEncoding {
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
generateRandomTestData(kvset, numBatchesWritten++, includesTag, encoder, blkEncodingCtx,
userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR,
EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
@ -166,7 +166,7 @@ public class TestPrefixTreeEncoding {
do {
Cell currentKV = seeker.getCell();
System.out.println(currentKV);
if (previousKV != null && CellComparator.COMPARATOR.compare(currentKV, previousKV) < 0) {
if (previousKV != null && CellComparatorImpl.COMPARATOR.compare(currentKV, previousKV) < 0) {
dumpInputKVSet();
fail("Current kv " + currentKV + " is smaller than previous keyvalue " + previousKV);
}
@ -194,7 +194,7 @@ public class TestPrefixTreeEncoding {
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
generateRandomTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR,
EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
@ -217,7 +217,7 @@ public class TestPrefixTreeEncoding {
ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
generateFixedTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR,
EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
@ -244,7 +244,7 @@ public class TestPrefixTreeEncoding {
fail("Get error result after seeking " + firstOnRow);
}
if (hasMoreOfEncodeScanner) {
if (CellComparator.COMPARATOR.compare(encodeSeeker.getCell(),
if (CellComparatorImpl.COMPARATOR.compare(encodeSeeker.getCell(),
collectionScanner.peek()) != 0) {
dumpInputKVSet();
fail("Expected " + collectionScanner.peek() + " actual "

View File

@ -25,7 +25,7 @@ import java.util.Collection;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@ -284,7 +284,7 @@ public class TestSeekToBlockWithEncoders {
HFILEBLOCK_DUMMY_HEADER, meta);
ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs,
encodingContext, this.useOffheapData);
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR,
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
encoder.newDataBlockDecodingContext(meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
encodedSeekers.add(seeker);

View File

@ -40,7 +40,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
@ -372,7 +372,7 @@ public class TestCacheOnWrite {
.withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding())
.withIncludesTags(useTags).build();
StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs)
.withOutputDir(storeFileParentDir).withComparator(CellComparator.COMPARATOR)
.withOutputDir(storeFileParentDir).withComparator(CellComparatorImpl.COMPARATOR)
.withFileContext(meta)
.withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build();
byte[] cf = Bytes.toBytes("fam");

View File

@ -97,7 +97,7 @@ public class TestFixedFileTrailer {
t.setLastDataBlockOffset(291);
t.setNumDataIndexLevels(3);
t.setComparatorClass(CellComparator.COMPARATOR.getClass());
t.setComparatorClass(CellComparatorImpl.COMPARATOR.getClass());
t.setFirstDataBlockOffset(9081723123L); // Completely unrealistic.
t.setUncompressedDataIndexSize(827398717L); // Something random.
@ -178,7 +178,7 @@ public class TestFixedFileTrailer {
t.setEntryCount(((long) Integer.MAX_VALUE) + 1);
t.setLastDataBlockOffset(291);
t.setNumDataIndexLevels(3);
t.setComparatorClass(CellComparator.COMPARATOR.getClass());
t.setComparatorClass(CellComparatorImpl.COMPARATOR.getClass());
t.setFirstDataBlockOffset(9081723123L); // Completely unrealistic.
t.setUncompressedDataIndexSize(827398717L); // Something random.
t.setLoadOnOpenOffset(128);
@ -276,7 +276,7 @@ public class TestFixedFileTrailer {
assertEquals(expected.getFirstDataBlockOffset(),
loaded.getFirstDataBlockOffset());
assertTrue(
expected.createComparator() instanceof CellComparator);
expected.createComparator() instanceof CellComparatorImpl);
assertEquals(expected.getUncompressedDataIndexSize(),
loaded.getUncompressedDataIndexSize());
}

View File

@ -41,7 +41,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -117,7 +117,7 @@ public class TestHFile {
HFileContext meta = new HFileContextBuilder().withBlockSize(64 * 1024).build();
StoreFileWriter sfw =
new StoreFileWriter.Builder(conf, fs).withOutputDir(storeFileParentDir)
.withComparator(CellComparator.COMPARATOR).withFileContext(meta).build();
.withComparator(CellComparatorImpl.COMPARATOR).withFileContext(meta).build();
final int rowLen = 32;
Random RNG = new Random();
@ -319,7 +319,7 @@ public class TestHFile {
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.withComparator(CellComparator.COMPARATOR)
.withComparator(CellComparatorImpl.COMPARATOR)
.create();
LOG.info(writer);
writeRecords(writer, useTags);
@ -486,72 +486,72 @@ public class TestHFile {
9,
KeyValue.Type.Maximum.getCode(),
HConstants.EMPTY_BYTE_ARRAY);
Cell mid = HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR, left, right);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left, mid) <= 0);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid, right) == 0);
Cell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) <= 0);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) == 0);
}
@Test
public void testGetShortMidpoint() {
Cell left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a"));
Cell right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a"));
Cell mid = HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR, left, right);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left, mid) <= 0);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid, right) <= 0);
Cell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) <= 0);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0);
left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a"));
right = CellUtil.createCell(Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("a"));
mid = HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR, left, right);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left, mid) < 0);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid, right) <= 0);
mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0);
left = CellUtil.createCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a"));
right = CellUtil.createCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a"));
mid = HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR, left, right);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left, mid) < 0);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid, right) <= 0);
mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0);
left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a"));
right = CellUtil.createCell(Bytes.toBytes("bbbbbbb"), Bytes.toBytes("a"), Bytes.toBytes("a"));
mid = HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR, left, right);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left, mid) < 0);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid, right) < 0);
mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0);
assertEquals(1, mid.getRowLength());
left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a"));
right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("a"));
mid = HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR, left, right);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left, mid) < 0);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid, right) <= 0);
mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0);
left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a"));
right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaa"), Bytes.toBytes("b"));
mid = HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR, left, right);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left, mid) < 0);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid, right) < 0);
mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0);
assertEquals(2, mid.getFamilyLength());
left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a"));
right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaaa"));
mid = HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR, left, right);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left, mid) < 0);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid, right) < 0);
mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0);
assertEquals(2, mid.getQualifierLength());
left = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a"));
right = CellUtil.createCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("b"));
mid = HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR, left, right);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left, mid) < 0);
assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid, right) <= 0);
mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0);
assertEquals(1, mid.getQualifierLength());
// Assert that if meta comparator, it returns the right cell -- i.e. no
// optimization done.
left = CellUtil.createCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a"));
right = CellUtil.createCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a"));
mid = HFileWriterImpl.getMidpoint(CellComparator.META_COMPARATOR, left, right);
assertTrue(CellComparator.META_COMPARATOR.compareKeyIgnoresMvcc(left, mid) < 0);
assertTrue(CellComparator.META_COMPARATOR.compareKeyIgnoresMvcc(mid, right) == 0);
mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.META_COMPARATOR, left, right);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0);
assertTrue(CellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) == 0);
/**
* See HBASE-7845
@ -562,7 +562,7 @@ public class TestHFile {
byte[] family = Bytes.toBytes("family");
byte[] qualA = Bytes.toBytes("qfA");
byte[] qualB = Bytes.toBytes("qfB");
final CellComparator keyComparator = CellComparator.COMPARATOR;
final CellComparatorImpl keyComparator = CellComparatorImpl.COMPARATOR;
// verify that faked shorter rowkey could be generated
long ts = 5;
KeyValue kv1 = new KeyValue(Bytes.toBytes("the quick brown fox"), family, qualA, ts, Type.Put);
@ -601,7 +601,7 @@ public class TestHFile {
assertTrue(newKey.getTypeByte() == Type.Maximum.getCode());
// verify metaKeyComparator's getShortMidpointKey output
final CellComparator metaKeyComparator = CellComparator.META_COMPARATOR;
final CellComparatorImpl metaKeyComparator = CellComparatorImpl.META_COMPARATOR;
kv1 = new KeyValue(Bytes.toBytes("ilovehbase123"), family, qualA, 5, Type.Put);
kv2 = new KeyValue(Bytes.toBytes("ilovehbase234"), family, qualA, 0, Type.Put);
newKey = HFileWriterImpl.getMidpoint(metaKeyComparator, kv1, kv2);

View File

@ -46,7 +46,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@ -175,7 +175,7 @@ public class TestHFileBlock {
// sort it and write to stream
int totalSize = 0;
Collections.sort(keyValues, CellComparator.COMPARATOR);
Collections.sort(keyValues, CellComparatorImpl.COMPARATOR);
for (KeyValue kv : keyValues) {
totalSize += kv.getLength();

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -213,7 +213,7 @@ public class TestHFileBlockIndex {
BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
HFileBlockIndex.BlockIndexReader indexReader =
new HFileBlockIndex.CellBasedKeyBlockIndexReader(
CellComparator.COMPARATOR, numLevels, brw);
CellComparatorImpl.COMPARATOR, numLevels, brw);
indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);
@ -230,7 +230,7 @@ public class TestHFileBlockIndex {
HFileBlock b =
indexReader.seekToDataBlock(keyOnlyKey, null, true,
true, false, null);
if (CellComparator.COMPARATOR.compare(keyOnlyKey, firstKeyInFile,
if (CellUtil.compare(CellComparatorImpl.COMPARATOR, keyOnlyKey, firstKeyInFile,
0, firstKeyInFile.length) < 0) {
assertTrue(b == null);
++i;
@ -375,7 +375,7 @@ public class TestHFileBlockIndex {
// Make sure the keys are increasing.
for (int i = 0; i < keys.size() - 1; ++i)
assertTrue(CellComparator.COMPARATOR.compare(
assertTrue(CellComparatorImpl.COMPARATOR.compare(
new KeyValue.KeyOnlyKeyValue(keys.get(i), 0, keys.get(i).length),
new KeyValue.KeyOnlyKeyValue(keys.get(i + 1), 0, keys.get(i + 1).length)) < 0);
@ -414,7 +414,7 @@ public class TestHFileBlockIndex {
KeyValue.KeyOnlyKeyValue cell = new KeyValue.KeyOnlyKeyValue(
arrayHoldingKey, searchKey.length / 2, searchKey.length);
int searchResult = BlockIndexReader.binarySearchNonRootIndex(cell,
new MultiByteBuff(nonRootIndex), CellComparator.COMPARATOR);
new MultiByteBuff(nonRootIndex), CellComparatorImpl.COMPARATOR);
String lookupFailureMsg = "Failed to look up key #" + i + " ("
+ Bytes.toStringBinary(searchKey) + ")";
@ -440,7 +440,7 @@ public class TestHFileBlockIndex {
// higher-level API function.s
boolean locateBlockResult =
(BlockIndexReader.locateNonRootIndexEntry(new MultiByteBuff(nonRootIndex), cell,
CellComparator.COMPARATOR) != -1);
CellComparatorImpl.COMPARATOR) != -1);
if (i == 0) {
assertFalse(locateBlockResult);
@ -636,7 +636,7 @@ public class TestHFileBlockIndex {
values[i] = CellUtil.cloneValue(kv);
keyStrSet.add(Bytes.toStringBinary(k));
if (i > 0) {
assertTrue((CellComparator.COMPARATOR.compare(kv, keys[i - 1],
assertTrue((CellUtil.compare(CellComparatorImpl.COMPARATOR, kv, keys[i - 1],
0, keys[i - 1].length)) > 0);
}
}

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
@ -137,7 +137,7 @@ public class TestHFileSeek extends TestCase {
Writer writer = HFile.getWriterFactoryNoCache(conf)
.withOutputStream(fout)
.withFileContext(context)
.withComparator(CellComparator.COMPARATOR)
.withComparator(CellComparatorImpl.COMPARATOR)
.create();
try {
BytesWritable key = new BytesWritable();

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@ -125,7 +126,7 @@ public class TestHFileWriterV3 {
HFile.Writer writer = new HFile.WriterFactory(conf, new CacheConfig(conf))
.withPath(fs, hfilePath)
.withFileContext(context)
.withComparator(CellComparator.COMPARATOR)
.withComparator(CellComparatorImpl.COMPARATOR)
.create();
Random rand = new Random(9713312); // Just a fixed seed.

View File

@ -25,7 +25,7 @@ import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
@ -111,7 +111,7 @@ public class TestPrefetch {
.build();
StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs)
.withOutputDir(storeFileParentDir)
.withComparator(CellComparator.COMPARATOR)
.withComparator(CellComparatorImpl.COMPARATOR)
.withFileContext(meta)
.build();

View File

@ -26,7 +26,7 @@ import java.util.List;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
@ -66,7 +66,7 @@ public class TestReseekTo {
.withOutputStream(fout)
.withFileContext(context)
// NOTE: This test is dependent on this deprecated nonstandard comparator
.withComparator(CellComparator.COMPARATOR)
.withComparator(CellComparatorImpl.COMPARATOR)
.create();
int numberOfKeys = 1000;

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@ -122,7 +122,7 @@ public class TestSeekTo {
Configuration conf = TEST_UTIL.getConfiguration();
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout)
.withFileContext(context)
.withComparator(CellComparator.COMPARATOR).create();
.withComparator(CellComparatorImpl.COMPARATOR).create();
// 4 bytes * 3 * 2 for each key/value +
// 3 for keys, 15 for values = 42 (woot)
writer.append(toKV("c", tagUsage));

View File

@ -47,7 +47,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
@ -882,7 +882,7 @@ public class TestPartitionedMobCompactor {
false, true, false, false, HConstants.LATEST_TIMESTAMP));
long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
long ttl = HStore.determineTTLFromFamily(hcd);
ScanInfo scanInfo = new ScanInfo(conf, hcd, ttl, timeToPurgeDeletes, CellComparator.COMPARATOR);
ScanInfo scanInfo = new ScanInfo(conf, hcd, ttl, timeToPurgeDeletes, CellComparatorImpl.COMPARATOR);
StoreScanner scanner = new StoreScanner(scanInfo, ScanType.COMPACT_RETAIN_DELETES, scanners);
List<Cell> results = new ArrayList<>();
boolean hasMore = true;

View File

@ -25,7 +25,7 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
@ -334,6 +334,6 @@ public class TestProtobufUtil {
ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength());
CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV);
Cell newOffheapKV = ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell);
assertTrue(CellComparator.COMPARATOR.compare(offheapKV, newOffheapKV) == 0);
assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0);
}
}

View File

@ -23,7 +23,7 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.CollectionBackedScanner;
@ -34,14 +34,14 @@ import org.apache.hadoop.hbase.util.CollectionBackedScanner;
* to be a store file scanner.
*/
public class KeyValueScanFixture extends CollectionBackedScanner {
public KeyValueScanFixture(CellComparator comparator, Cell... cells) {
public KeyValueScanFixture(CellComparatorImpl comparator, Cell... cells) {
super(comparator, cells);
}
public static List<KeyValueScanner> scanFixture(KeyValue[] ... kvArrays) {
ArrayList<KeyValueScanner> scanners = new ArrayList<>();
for (KeyValue [] kvs : kvArrays) {
scanners.add(new KeyValueScanFixture(CellComparator.COMPARATOR, kvs));
scanners.add(new KeyValueScanFixture(CellComparatorImpl.COMPARATOR, kvs));
}
return scanners;
}

View File

@ -29,7 +29,7 @@ import java.util.SortedSet;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
@ -112,9 +112,9 @@ public class TestCellFlatSet extends TestCase {
lowerOuterCell = new KeyValue(Bytes.toBytes(10), f, q, 10, v);
upperOuterCell = new KeyValue(Bytes.toBytes(50), f, q, 10, v);
ascCells = new Cell[] {kv1,kv2,kv3,kv4};
ascCbOnHeap = new CellArrayMap(CellComparator.COMPARATOR,ascCells,0,NUM_OF_CELLS,false);
ascCbOnHeap = new CellArrayMap(CellComparatorImpl.COMPARATOR,ascCells,0,NUM_OF_CELLS,false);
descCells = new Cell[] {kv4,kv3,kv2,kv1};
descCbOnHeap = new CellArrayMap(CellComparator.COMPARATOR,descCells,0,NUM_OF_CELLS,true);
descCbOnHeap = new CellArrayMap(CellComparatorImpl.COMPARATOR,descCells,0,NUM_OF_CELLS,true);
CONF.setBoolean(MemStoreLAB.USEMSLAB_KEY, true);
CONF.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f);
@ -315,6 +315,6 @@ public class TestCellFlatSet extends TestCase {
idxOffset = ByteBufferUtils.putLong(idxBuffer, idxOffset, kv.getSequenceId()); // seqId
}
return new CellChunkMap(CellComparator.COMPARATOR,chunkArray,0,NUM_OF_CELLS,!asc);
return new CellChunkMap(CellComparatorImpl.COMPARATOR,chunkArray,0,NUM_OF_CELLS,!asc);
}
}

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
import junit.framework.TestCase;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@ -34,7 +34,7 @@ import java.util.SortedSet;
@Category({RegionServerTests.class, SmallTests.class})
public class TestCellSkipListSet extends TestCase {
private final CellSet csls =
new CellSet(CellComparator.COMPARATOR);
new CellSet(CellComparatorImpl.COMPARATOR);
protected void setUp() throws Exception {
super.setUp();

Some files were not shown because too many files have changed in this diff Show More