HBASE-12047 Avoid usage of KeyValueUtil#ensureKeyValue in simple cases.

This commit is contained in:
anoopsjohn 2014-09-23 16:42:01 +05:30
parent 411991cf1d
commit 791a03b40f
15 changed files with 124 additions and 151 deletions

View File

@ -26,8 +26,8 @@ import java.util.UUID;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.visibility.CellVisibility;
import org.apache.hadoop.hbase.util.Bytes;
@ -121,14 +121,13 @@ public class Append extends Mutation {
@SuppressWarnings("unchecked")
public Append add(final Cell cell) {
// Presume it is KeyValue for now.
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
byte [] family = kv.getFamily();
byte [] family = CellUtil.cloneFamily(cell);
List<Cell> list = this.familyMap.get(family);
if (list == null) {
list = new ArrayList<Cell>();
}
// find where the new entry should be placed in the List
list.add(kv);
list.add(cell);
this.familyMap.put(family, list);
return this;
}

View File

@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
@ -1449,9 +1448,7 @@ public class HTable implements HTableInterface, RegionLocator {
if (maxKeyValueSize > 0) {
for (List<Cell> list : put.getFamilyCellMap().values()) {
for (Cell cell : list) {
// KeyValue v1 expectation. Cast for now.
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
if (kv.getLength() > maxKeyValueSize) {
if (KeyValueUtil.length(cell) > maxKeyValueSize) {
throw new IllegalArgumentException("KeyValue size too large");
}
}

View File

@ -28,6 +28,7 @@ import java.util.UUID;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.TimeRange;
@ -97,17 +98,16 @@ public class Increment extends Mutation implements Comparable<Row> {
*/
@SuppressWarnings("unchecked")
public Increment add(Cell cell) throws IOException{
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
byte [] family = kv.getFamily();
byte [] family = CellUtil.cloneFamily(cell);
List<Cell> list = getCellList(family);
//Checking that the row of the kv is the same as the put
int res = Bytes.compareTo(this.row, 0, row.length,
kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
if (res != 0) {
throw new WrongRowIOException("The row in " + kv.toString() +
throw new WrongRowIOException("The row in " + cell +
" doesn't match the original one " + Bytes.toStringBinary(this.row));
}
list.add(kv);
list.add(cell);
familyMap.put(family, list);
return this;
}
@ -133,7 +133,7 @@ public class Increment extends Mutation implements Comparable<Row> {
List<Cell> list = getCellList(family);
KeyValue kv = createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount));
list.add(kv);
familyMap.put(kv.getFamily(), list);
familyMap.put(CellUtil.cloneFamily(kv), list);
return this;
}
@ -197,9 +197,8 @@ public class Increment extends Mutation implements Comparable<Row> {
for (Map.Entry<byte [], List<Cell>> entry: map.entrySet()) {
NavigableMap<byte [], Long> longs = new TreeMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
for (Cell cell: entry.getValue()) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
longs.put(kv.getQualifier(),
Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
longs.put(CellUtil.cloneQualifier(cell),
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
}
results.put(entry.getKey(), longs);
}

View File

@ -28,11 +28,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
@ -292,9 +292,8 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
}
long resultSize = 0;
for (Result rr : rrs) {
for (Cell kv : rr.rawCells()) {
// TODO add getLength to Cell/use CellUtil#estimatedSizeOf
resultSize += KeyValueUtil.ensureKeyValue(kv).getLength();
for (Cell cell : rr.rawCells()) {
resultSize += CellUtil.estimatedLengthOf(cell);
}
}
this.scanMetrics.countOfBytesInResults.addAndGet(resultSize);

View File

@ -119,7 +119,7 @@ public class FuzzyRowFilter extends FilterBase {
// TODO: is there a better way than throw exception? (stop the scanner?)
throw new IllegalStateException("No next row key that satisfies fuzzy exists when"
+ " getNextKeyHint() is invoked." + " Filter: " + this.toString() + " currentKV: "
+ KeyValueUtil.ensureKeyValue(currentCell).toString());
+ currentCell);
}
return KeyValueUtil.createFirstOnRow(nextRowKey);

View File

@ -673,7 +673,7 @@ public final class ProtobufUtil {
delete =
new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp);
}
delete.addDeleteMarker(KeyValueUtil.ensureKeyValue(cell));
delete.addDeleteMarker(cell);
}
} else {
delete = new Delete(row, timestamp);
@ -737,7 +737,7 @@ public final class ProtobufUtil {
if (append == null) {
append = new Append(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
}
append.add(KeyValueUtil.ensureKeyValue(cell));
append.add(cell);
}
} else {
append = new Append(row);
@ -816,7 +816,7 @@ public final class ProtobufUtil {
if (increment == null) {
increment = new Increment(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
}
increment.add(KeyValueUtil.ensureKeyValue(cell));
increment.add(cell);
}
} else {
increment = new Increment(row);
@ -1126,14 +1126,13 @@ public final class ProtobufUtil {
List<Cell> values = family.getValue();
if (values != null && values.size() > 0) {
for (Cell cell: values) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
valueBuilder.setQualifier(ByteStringer.wrap(
kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()));
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()));
valueBuilder.setValue(ByteStringer.wrap(
kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
if (kv.getTagsLength() > 0) {
valueBuilder.setTags(ByteStringer.wrap(kv.getTagsArray(),
kv.getTagsOffset(), kv.getTagsLength()));
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
if (cell.getTagsLength() > 0) {
valueBuilder.setTags(ByteStringer.wrap(cell.getTagsArray(),
cell.getTagsOffset(), cell.getTagsLength()));
}
columnBuilder.addQualifierValue(valueBuilder.build());
}
@ -1189,18 +1188,17 @@ public final class ProtobufUtil {
columnBuilder.clear();
columnBuilder.setFamily(ByteStringer.wrap(family.getKey()));
for (Cell cell: family.getValue()) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
valueBuilder.setQualifier(ByteStringer.wrap(
kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()));
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()));
valueBuilder.setValue(ByteStringer.wrap(
kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
valueBuilder.setTimestamp(kv.getTimestamp());
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
valueBuilder.setTimestamp(cell.getTimestamp());
if(cell.getTagsLength() > 0) {
valueBuilder.setTags(ByteStringer.wrap(kv.getTagsArray(), kv.getTagsOffset(),
kv.getTagsLength()));
valueBuilder.setTags(ByteStringer.wrap(cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength()));
}
if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(kv))) {
KeyValue.Type keyValueType = KeyValue.Type.codeToType(kv.getType());
if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) {
KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte());
valueBuilder.setDeleteType(toDeleteType(keyValueType));
}
columnBuilder.addQualifierValue(valueBuilder.build());

View File

@ -24,6 +24,7 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.SettableSequenceId;
@ -475,34 +476,31 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder {
@Override
@Deprecated
public byte[] getValue() {
throw new UnsupportedOperationException("getValue() not supported");
return CellUtil.cloneValue(this);
}
@Override
@Deprecated
public byte[] getFamily() {
throw new UnsupportedOperationException("getFamily() not supported");
return CellUtil.cloneFamily(this);
}
@Override
@Deprecated
public byte[] getQualifier() {
throw new UnsupportedOperationException("getQualifier() not supported");
return CellUtil.cloneQualifier(this);
}
@Override
@Deprecated
public byte[] getRow() {
throw new UnsupportedOperationException("getRow() not supported");
return CellUtil.cloneRow(this);
}
@Override
public String toString() {
KeyValue kv = KeyValueUtil.copyToNewKeyValue(this);
if (kv == null) {
return "null";
}
return kv.toString();
return KeyValue.keyToString(this.keyOnlyBuffer, 0, KeyValueUtil.keyLength(this)) + "/vlen="
+ getValueLength() + "/seqid=" + seqId;
}
@Override

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValueUtil;
@ -84,9 +85,8 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
Result result = Result.create(values);
if (this.scanMetrics != null) {
long resultSize = 0;
for (Cell kv : values) {
// TODO add getLength to Cell/use CellUtil#estimatedSizeOf
resultSize += KeyValueUtil.ensureKeyValue(kv).getLength();
for (Cell cell : values) {
resultSize += CellUtil.estimatedLengthOf(cell);
}
this.scanMetrics.countOfBytesInResults.addAndGet(resultSize);
}

View File

@ -145,10 +145,8 @@ import org.apache.hadoop.hbase.util.HashedBytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@ -3102,7 +3100,7 @@ public class HRegion implements HeapSize { // , Writable{
CellUtil.setSequenceId(cell, mvccNum);
Pair<Long, Cell> ret = store.add(cell);
size += ret.getFirst();
memstoreCells.add(KeyValueUtil.ensureKeyValue(ret.getSecond()));
memstoreCells.add(ret.getSecond());
}
}
@ -3118,7 +3116,7 @@ public class HRegion implements HeapSize { // , Writable{
int kvsRolledback = 0;
for (Cell cell : memstoreCells) {
byte[] family = cell.getFamily();
byte[] family = CellUtil.cloneFamily(cell);
Store store = getStore(family);
store.rollback(cell);
kvsRolledback++;
@ -3170,8 +3168,8 @@ public class HRegion implements HeapSize { // , Writable{
for (List<Cell> kvs : familyMap.values()) {
for (Cell cell : kvs) {
// see if the user-side TS is out of range. latest = server-side
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
if (!kv.isLatestTimestamp() && kv.getTimestamp() > maxTs) {
long ts = cell.getTimestamp();
if (ts != HConstants.LATEST_TIMESTAMP && ts > maxTs) {
throw new FailedSanityCheckException("Timestamp for KV out of range "
+ cell + " (too.new=" + timestampSlop + ")");
}
@ -4935,8 +4933,8 @@ public class HRegion implements HeapSize { // , Writable{
// do after lock
if (this.metricsRegion != null) {
long totalSize = 0l;
for (Cell kv : results) {
totalSize += KeyValueUtil.ensureKeyValue(kv).getLength();
for (Cell cell : results) {
totalSize += CellUtil.estimatedLengthOf(cell);
}
this.metricsRegion.updateGet(totalSize);
}
@ -5123,8 +5121,8 @@ public class HRegion implements HeapSize { // , Writable{
processor.getRowsToLock().iterator().next()) + "...");
for (Mutation m : mutations) {
for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cellScanner.current());
getStore(kv).rollback(kv);
Cell cell = cellScanner.current();
getStore(cell).rollback(cell);
}
}
}
@ -5283,60 +5281,61 @@ public class HRegion implements HeapSize { // , Writable{
// Would be nice if KeyValue had scatter/gather logic
int idx = 0;
for (Cell cell : family.getValue()) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
KeyValue newKV;
KeyValue oldKv = null;
Cell newCell;
Cell oldCell = null;
if (idx < results.size()
&& CellUtil.matchingQualifier(results.get(idx),kv)) {
oldKv = KeyValueUtil.ensureKeyValue(results.get(idx));
&& CellUtil.matchingQualifier(results.get(idx), cell)) {
oldCell = results.get(idx);
// allocate an empty kv once
newKV = new KeyValue(row.length, kv.getFamilyLength(),
kv.getQualifierLength(), now, KeyValue.Type.Put,
oldKv.getValueLength() + kv.getValueLength(),
oldKv.getTagsLength() + kv.getTagsLength());
newCell = new KeyValue(row.length, cell.getFamilyLength(),
cell.getQualifierLength(), now, KeyValue.Type.Put,
oldCell.getValueLength() + cell.getValueLength(),
oldCell.getTagsLength() + cell.getTagsLength());
// copy in the value
System.arraycopy(oldKv.getValueArray(), oldKv.getValueOffset(),
newKV.getValueArray(), newKV.getValueOffset(),
oldKv.getValueLength());
System.arraycopy(kv.getValueArray(), kv.getValueOffset(),
newKV.getValueArray(),
newKV.getValueOffset() + oldKv.getValueLength(),
kv.getValueLength());
System.arraycopy(oldCell.getValueArray(), oldCell.getValueOffset(),
newCell.getValueArray(), newCell.getValueOffset(),
oldCell.getValueLength());
System.arraycopy(cell.getValueArray(), cell.getValueOffset(),
newCell.getValueArray(),
newCell.getValueOffset() + oldCell.getValueLength(),
cell.getValueLength());
// copy in the tags
System.arraycopy(oldKv.getTagsArray(), oldKv.getTagsOffset(), newKV.getTagsArray(),
newKV.getTagsOffset(), oldKv.getTagsLength());
System.arraycopy(kv.getTagsArray(), kv.getTagsOffset(), newKV.getTagsArray(),
newKV.getTagsOffset() + oldKv.getTagsLength(), kv.getTagsLength());
System.arraycopy(oldCell.getTagsArray(), oldCell.getTagsOffset(),
newCell.getTagsArray(), newCell.getTagsOffset(), oldCell.getTagsLength());
System.arraycopy(cell.getTagsArray(), cell.getTagsOffset(), newCell.getTagsArray(),
newCell.getTagsOffset() + oldCell.getTagsLength(), cell.getTagsLength());
// copy in row, family, and qualifier
System.arraycopy(kv.getRowArray(), kv.getRowOffset(),
newKV.getRowArray(), newKV.getRowOffset(), kv.getRowLength());
System.arraycopy(kv.getFamilyArray(), kv.getFamilyOffset(),
newKV.getFamilyArray(), newKV.getFamilyOffset(),
kv.getFamilyLength());
System.arraycopy(kv.getQualifierArray(), kv.getQualifierOffset(),
newKV.getQualifierArray(), newKV.getQualifierOffset(),
kv.getQualifierLength());
System.arraycopy(cell.getRowArray(), cell.getRowOffset(),
newCell.getRowArray(), newCell.getRowOffset(), cell.getRowLength());
System.arraycopy(cell.getFamilyArray(), cell.getFamilyOffset(),
newCell.getFamilyArray(), newCell.getFamilyOffset(),
cell.getFamilyLength());
System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(),
newCell.getQualifierArray(), newCell.getQualifierOffset(),
cell.getQualifierLength());
idx++;
} else {
newKV = kv;
// Append's KeyValue.Type==Put and ts==HConstants.LATEST_TIMESTAMP,
// so only need to update the timestamp to 'now'
// TODO get rid of KeyValueUtil.ensureKeyValue
KeyValue newKV = KeyValueUtil.ensureKeyValue(cell);
newKV.updateLatestStamp(Bytes.toBytes(now));
newCell = newKV;
}
newKV.setSequenceId(mvccNum);
CellUtil.setSequenceId(newCell, mvccNum);
// Give coprocessors a chance to update the new cell
if (coprocessorHost != null) {
newKV = KeyValueUtil.ensureKeyValue(coprocessorHost.postMutationBeforeWAL(
RegionObserver.MutationType.APPEND, append, oldKv, newKV));
newCell = coprocessorHost.postMutationBeforeWAL(RegionObserver.MutationType.APPEND,
append, oldCell, newCell);
}
kvs.add(newKV);
kvs.add(newCell);
// Append update to WAL
if (writeToWAL) {
if (walEdits == null) {
walEdits = new WALEdit();
}
walEdits.add(newKV);
walEdits.add(newCell);
}
}
@ -5354,8 +5353,7 @@ public class HRegion implements HeapSize { // , Writable{
} else {
// otherwise keep older versions around
for (Cell cell: entry.getValue()) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
Pair<Long, Cell> ret = store.add(kv);
Pair<Long, Cell> ret = store.add(cell);
size += ret.getFirst();
memstoreCells.add(ret.getSecond());
doRollBackMemstore = true;
@ -5520,7 +5518,7 @@ public class HRegion implements HeapSize { // , Writable{
byte[] val = Bytes.toBytes(amount);
int oldCellTagsLen = (c == null) ? 0 : c.getTagsLength();
int incCellTagsLen = kv.getTagsLength();
KeyValue newKV = new KeyValue(row.length, family.getKey().length, q.length, now,
Cell newKV = new KeyValue(row.length, family.getKey().length, q.length, now,
KeyValue.Type.Put, val.length, oldCellTagsLen + incCellTagsLen);
System.arraycopy(row, 0, newKV.getRowArray(), newKV.getRowOffset(), row.length);
System.arraycopy(family.getKey(), 0, newKV.getFamilyArray(), newKV.getFamilyOffset(),
@ -5537,11 +5535,11 @@ public class HRegion implements HeapSize { // , Writable{
System.arraycopy(kv.getTagsArray(), kv.getTagsOffset(), newKV.getTagsArray(),
newKV.getTagsOffset() + oldCellTagsLen, incCellTagsLen);
}
newKV.setSequenceId(mvccNum);
CellUtil.setSequenceId(newKV, mvccNum);
// Give coprocessors a chance to update the new cell
if (coprocessorHost != null) {
newKV = KeyValueUtil.ensureKeyValue(coprocessorHost.postMutationBeforeWAL(
RegionObserver.MutationType.INCREMENT, increment, c, newKV));
newKV = coprocessorHost.postMutationBeforeWAL(
RegionObserver.MutationType.INCREMENT, increment, c, newKV);
}
allKVs.add(newKV);
@ -5575,8 +5573,7 @@ public class HRegion implements HeapSize { // , Writable{
} else {
// otherwise keep older versions around
for (Cell cell : entry.getValue()) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
Pair<Long, Cell> ret = store.add(kv);
Pair<Long, Cell> ret = store.add(cell);
size += ret.getFirst();
memstoreCells.add(ret.getSecond());
doRollBackMemstore = true;
@ -6064,8 +6061,8 @@ public class HRegion implements HeapSize { // , Writable{
long mutationSize = 0;
for (List<Cell> cells: familyMap.values()) {
for (Cell cell : cells) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
mutationSize += kv.getKeyLength() + kv.getValueLength();
// TODO we need include tags length also here.
mutationSize += KeyValueUtil.keyLength(cell) + cell.getValueLength();
}
}

View File

@ -28,8 +28,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.regionserver.compactions.Compactor;
@ -120,8 +118,7 @@ abstract class StoreFlusher {
// If we know that this KV is going to be included always, then let us
// set its memstoreTS to 0. This will help us save space when writing to
// disk.
KeyValue kv = KeyValueUtil.ensureKeyValue(c);
sink.append(kv);
sink.append(c);
}
kvs.clear();
}

View File

@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HConnection;
@ -149,9 +148,9 @@ public class ReplicationSink {
addToHashMultiMap(rowMap, table, clusterIds, m);
}
if (CellUtil.isDelete(cell)) {
((Delete)m).addDeleteMarker(KeyValueUtil.ensureKeyValue(cell));
((Delete)m).addDeleteMarker(cell);
} else {
((Put)m).add(KeyValueUtil.ensureKeyValue(cell));
((Put)m).add(cell);
}
previousCell = cell;
}

View File

@ -35,11 +35,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
@ -198,10 +198,9 @@ public class RemoteHTable implements HTableInterface {
long ts = put.getTimeStamp();
for (List<Cell> cells: put.getFamilyCellMap().values()) {
for (Cell cell: cells) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
row.addCell(new CellModel(kv.getFamily(), kv.getQualifier(),
ts != HConstants.LATEST_TIMESTAMP ? ts : kv.getTimestamp(),
kv.getValue()));
row.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
ts != HConstants.LATEST_TIMESTAMP ? ts : cell.getTimestamp(),
CellUtil.cloneValue(cell)));
}
}
CellSetModel model = new CellSetModel();
@ -442,8 +441,7 @@ public class RemoteHTable implements HTableInterface {
for (Map.Entry<byte[], List<Cell>> e: map.entrySet()) {
RowModel row = new RowModel(e.getKey());
for (Cell cell: e.getValue()) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
row.addCell(new CellModel(kv));
row.addCell(new CellModel(cell));
}
model.addRow(row);
}

View File

@ -30,6 +30,7 @@ import javax.xml.bind.annotation.XmlValue;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
@ -99,10 +100,11 @@ public class CellModel implements ProtobufMessageHandler, Serializable {
/**
* Constructor from KeyValue
* @param kv
* @param cell
*/
public CellModel(KeyValue kv) {
this(kv.getFamily(), kv.getQualifier(), kv.getTimestamp(), kv.getValue());
public CellModel(org.apache.hadoop.hbase.Cell cell) {
this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), CellUtil
.cloneValue(cell));
}
/**

View File

@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@ -68,7 +67,6 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.EndpointObserver;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
@ -232,11 +230,10 @@ public class AccessController extends BaseMasterAndRegionObserver
for (Map.Entry<byte[], List<Cell>> f : familyMap.entrySet()) {
List<Cell> cells = f.getValue();
for (Cell cell: cells) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
if (Bytes.equals(kv.getFamilyArray(), kv.getFamilyOffset(),
kv.getFamilyLength(), AccessControlLists.ACL_LIST_FAMILY, 0,
if (Bytes.equals(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength(), AccessControlLists.ACL_LIST_FAMILY, 0,
AccessControlLists.ACL_LIST_FAMILY.length)) {
entries.add(kv.getRow());
entries.add(CellUtil.cloneRow(cell));
}
}
}
@ -771,15 +768,12 @@ public class AccessController extends BaseMasterAndRegionObserver
tags.add(tagIterator.next());
}
}
// Ensure KeyValue so we can do a scatter gather copy. This is only a win if the
// incoming cell type is actually KeyValue.
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
newCells.add(
new KeyValue(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(),
kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(),
kv.getTimestamp(), KeyValue.Type.codeToType(kv.getTypeByte()),
kv.getValueArray(), kv.getValueOffset(), kv.getValueLength(),
new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(),
tags));
}
// This is supposed to be safe, won't CME
@ -1769,15 +1763,14 @@ public class AccessController extends BaseMasterAndRegionObserver
// We need to create another KV, unfortunately, because the current new KV
// has no space for tags
KeyValue newKv = KeyValueUtil.ensureKeyValue(newCell);
KeyValue rewriteKv = new KeyValue(newKv.getRowArray(), newKv.getRowOffset(), newKv.getRowLength(),
newKv.getFamilyArray(), newKv.getFamilyOffset(), newKv.getFamilyLength(),
newKv.getQualifierArray(), newKv.getQualifierOffset(), newKv.getQualifierLength(),
newKv.getTimestamp(), KeyValue.Type.codeToType(newKv.getTypeByte()),
newKv.getValueArray(), newKv.getValueOffset(), newKv.getValueLength(),
tags);
KeyValue rewriteKv = new KeyValue(newCell.getRowArray(), newCell.getRowOffset(),
newCell.getRowLength(), newCell.getFamilyArray(), newCell.getFamilyOffset(),
newCell.getFamilyLength(), newCell.getQualifierArray(), newCell.getQualifierOffset(),
newCell.getQualifierLength(), newCell.getTimestamp(), KeyValue.Type.codeToType(newCell
.getTypeByte()), newCell.getValueArray(), newCell.getValueOffset(),
newCell.getValueLength(), tags);
// Preserve mvcc data
rewriteKv.setSequenceId(newKv.getMvccVersion());
rewriteKv.setSequenceId(newCell.getSequenceId());
return rewriteKv;
}

View File

@ -30,8 +30,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
@ -333,10 +331,9 @@ public class ThriftUtilities {
in.getFamilyCellMap().entrySet()) {
TColumn column = new TColumn(ByteBuffer.wrap(familyEntry.getKey()));
for (org.apache.hadoop.hbase.Cell cell: familyEntry.getValue()) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
byte[] family = kv.getFamily();
byte[] qualifier = kv.getQualifier();
long timestamp = kv.getTimestamp();
byte[] family = CellUtil.cloneFamily(cell);
byte[] qualifier = CellUtil.cloneQualifier(cell);
long timestamp = cell.getTimestamp();
if (family != null) {
column.setFamily(family);
}
@ -344,7 +341,7 @@ public class ThriftUtilities {
column.setQualifier(qualifier);
}
if (timestamp != HConstants.LATEST_TIMESTAMP) {
column.setTimestamp(kv.getTimestamp());
column.setTimestamp(timestamp);
}
}
columns.add(column);