diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index f2d777c60c6..c2573853a52 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -332,21 +332,6 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
this.length = length;
}
- /**
- * Creates a KeyValue from the specified byte array, starting at offset,
- * for length length
, and a known keyLength
.
- * @param bytes byte array
- * @param offset offset to start of the KeyValue
- * @param length length of the KeyValue
- * @param keyLength length of the key portion of the KeyValue
- */
- public KeyValue(final byte [] bytes, final int offset, final int length, final int keyLength) {
- this.bytes = bytes;
- this.offset = offset;
- this.length = length;
- this.keyLength = keyLength;
- }
-
/** Constructors that build a new backing byte array from fields */
/**
@@ -1231,13 +1216,8 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
/**
* @return Length of key portion.
*/
- private int keyLength = 0;
-
public int getKeyLength() {
- if (keyLength == 0) {
- keyLength = Bytes.toInt(this.bytes, this.offset);
- }
- return keyLength;
+ return Bytes.toInt(this.bytes, this.offset);
}
/**
@@ -2856,7 +2836,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
sum += ClassSize.REFERENCE;// pointer to "bytes"
sum += ClassSize.align(ClassSize.ARRAY);// "bytes"
sum += ClassSize.align(length);// number of bytes of data in the "bytes" array
- sum += 3 * Bytes.SIZEOF_INT;// offset, length, keyLength
+ sum += 2 * Bytes.SIZEOF_INT;// offset, length
sum += Bytes.SIZEOF_LONG;// memstoreTS
return ClassSize.align(sum);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index b1bdcd6f657..e7b14db1d97 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -651,7 +651,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
return null;
KeyValue ret = new KeyValue(blockBuffer.array(), blockBuffer.arrayOffset()
- + blockBuffer.position(), getCellBufSize(), currKeyLen);
+ + blockBuffer.position(), getCellBufSize());
if (this.reader.shouldIncludeMemstoreTS()) {
ret.setMvccVersion(currMemstoreTS);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
index 153d7c54766..dae9c70cee6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
@@ -247,11 +247,12 @@ public class ScanQueryMatcher {
byte [] bytes = kv.getBuffer();
int offset = kv.getOffset();
- int initialOffset = offset;
int keyLength = Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT);
offset += KeyValue.ROW_OFFSET;
+ int initialOffset = offset;
+
short rowLength = Bytes.toShort(bytes, offset, Bytes.SIZEOF_SHORT);
offset += Bytes.SIZEOF_SHORT;
@@ -282,10 +283,10 @@ public class ScanQueryMatcher {
byte familyLength = bytes [offset];
offset += familyLength + 1;
- int qualLength = keyLength + KeyValue.ROW_OFFSET -
+ int qualLength = keyLength -
(offset - initialOffset) - KeyValue.TIMESTAMP_TYPE_SIZE;
- long timestamp = kv.getTimestamp();
+ long timestamp = Bytes.toLong(bytes, initialOffset + keyLength - KeyValue.TIMESTAMP_TYPE_SIZE);
// check for early out based on timestamp alone
if (columns.isDone(timestamp)) {
return columns.getNextRowOrNextColumn(bytes, offset, qualLength);
@@ -304,7 +305,7 @@ public class ScanQueryMatcher {
* 7. Delete marker need to be version counted together with puts
* they affect
*/
- byte type = kv.getTypeByte();
+ byte type = bytes[initialOffset + keyLength - 1];
if (kv.isDelete()) {
if (!keepDeletedCells) {
// first ignore delete markers if the scanner can do so, and the