HBASE-1907 Version all client writables

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@825736 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2009-10-16 01:53:51 +00:00
parent f2520488de
commit 3a591a989c
6 changed files with 44 additions and 0 deletions

View File

@ -120,6 +120,7 @@ Release 0.21.0 - Unreleased
HBASE-1885 Simplify use of IndexedTable outside Java API
(Kevin Patterson via Stack)
HBASE-1903 Enable DEBUG by default
HBASE-1907 Version all client writables
OPTIMIZATIONS

View File

@ -66,6 +66,8 @@ import org.apache.hadoop.hbase.util.Bytes;
* timestamp. The constructor timestamp is not referenced.
*/
public class Delete implements Writable, Row, Comparable<Row> {
private static final byte DELETE_VERSION = (byte)1;
private byte [] row = null;
// This ts is only used when doing a deleteRow. Anything less,
private long ts;
@ -304,6 +306,10 @@ public class Delete implements Writable, Row, Comparable<Row> {
//Writable
public void readFields(final DataInput in) throws IOException {
int version = in.readByte();
if (version > DELETE_VERSION) {
throw new IOException("version not supported");
}
this.row = Bytes.readByteArray(in);
this.ts = in.readLong();
this.lockId = in.readLong();
@ -323,6 +329,7 @@ public class Delete implements Writable, Row, Comparable<Row> {
}
public void write(final DataOutput out) throws IOException {
out.writeByte(DELETE_VERSION);
Bytes.writeByteArray(out, this.row);
out.writeLong(this.ts);
out.writeLong(this.lockId);

View File

@ -60,6 +60,8 @@ import org.apache.hadoop.io.Writable;
* To add a filter, execute {@link #setFilter(Filter) setFilter}.
*/
public class Get implements Writable {
private static final byte GET_VERSION = (byte)1;
private byte [] row = null;
private long lockId = -1L;
private int maxVersions = 1;
@ -319,6 +321,10 @@ public class Get implements Writable {
//Writable
public void readFields(final DataInput in)
throws IOException {
int version = in.readByte();
if (version > GET_VERSION) {
throw new IOException("unsupported version");
}
this.row = Bytes.readByteArray(in);
this.lockId = in.readLong();
this.maxVersions = in.readInt();
@ -349,6 +355,7 @@ public class Get implements Writable {
public void write(final DataOutput out)
throws IOException {
out.writeByte(GET_VERSION);
Bytes.writeByteArray(out, this.row);
out.writeLong(this.lockId);
out.writeInt(this.maxVersions);

View File

@ -46,6 +46,8 @@ import org.apache.hadoop.hbase.util.ClassSize;
* {@link #add(byte[], byte[], long, byte[]) add} if setting the timestamp.
*/
public class Put implements HeapSize, Writable, Row, Comparable<Row> {
private static final byte PUT_VERSION = (byte)1;
private byte [] row = null;
private long timestamp = HConstants.LATEST_TIMESTAMP;
private long lockId = -1L;
@ -316,6 +318,10 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
//Writable
public void readFields(final DataInput in)
throws IOException {
int version = in.readByte();
if (version > PUT_VERSION) {
throw new IOException("version not supported");
}
this.row = Bytes.readByteArray(in);
this.timestamp = in.readLong();
this.lockId = in.readLong();
@ -341,6 +347,7 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
public void write(final DataOutput out)
throws IOException {
out.writeByte(PUT_VERSION);
Bytes.writeByteArray(out, this.row);
out.writeLong(this.timestamp);
out.writeLong(this.lockId);

View File

@ -63,6 +63,8 @@ import org.apache.hadoop.io.Writable;
* {@link KeyValue#getTimestamp()}, and {@link KeyValue#getValue()}.
*/
public class Result implements Writable {
private static final byte RESULT_VERSION = (byte)1;
private KeyValue [] kvs = null;
private NavigableMap<byte[],
NavigableMap<byte[], NavigableMap<Long, byte[]>>> familyMap = null;
@ -446,6 +448,11 @@ public class Result implements Writable {
public static void writeArray(final DataOutput out, Result [] results)
throws IOException {
// Write version when writing array form.
// This assumes that results are sent to the client as Result[], so we
// have an opportunity to handle version differences without affecting
// efficiency.
out.writeByte(RESULT_VERSION);
if(results == null || results.length == 0) {
out.writeInt(0);
return;
@ -477,6 +484,14 @@ public class Result implements Writable {
public static Result [] readArray(final DataInput in)
throws IOException {
// Read version for array form.
// This assumes that results are sent to the client as Result[], so we
// have an opportunity to handle version differences without affecting
// efficiency.
int version = in.readByte();
if (version > RESULT_VERSION) {
throw new IOException("version not supported");
}
int numResults = in.readInt();
if(numResults == 0) {
return new Result[0];

View File

@ -72,6 +72,8 @@ import org.apache.hadoop.io.WritableFactories;
* execute {@link #setCacheBlocks(boolean)}.
*/
public class Scan implements Writable {
private static final byte SCAN_VERSION = (byte)1;
private byte [] startRow = HConstants.EMPTY_START_ROW;
private byte [] stopRow = HConstants.EMPTY_END_ROW;
private int maxVersions = 1;
@ -435,6 +437,10 @@ public class Scan implements Writable {
//Writable
public void readFields(final DataInput in)
throws IOException {
int version = in.readByte();
if (version > (int)SCAN_VERSION) {
throw new IOException("version not supported");
}
this.startRow = Bytes.readByteArray(in);
this.stopRow = Bytes.readByteArray(in);
this.maxVersions = in.readInt();
@ -463,6 +469,7 @@ public class Scan implements Writable {
public void write(final DataOutput out)
throws IOException {
out.writeByte(SCAN_VERSION);
Bytes.writeByteArray(out, this.startRow);
Bytes.writeByteArray(out, this.stopRow);
out.writeInt(this.maxVersions);