From 958681238089004cc52a9719d3c99fe17e22cf90 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 28 Oct 2011 23:40:32 +0000 Subject: [PATCH] HBASE-4436 Remove trivial 0.90 deprecated code from 0.92 and trunk. git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1190675 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 2 + .../hadoop/hbase/HColumnDescriptor.java | 16 ----- .../org/apache/hadoop/hbase/KeyValue.java | 14 ----- .../apache/hadoop/hbase/client/Delete.java | 28 --------- .../org/apache/hadoop/hbase/client/Get.java | 37 ----------- .../hadoop/hbase/client/HBaseAdmin.java | 63 ------------------- .../hadoop/hbase/client/HConnection.java | 16 +---- .../hbase/client/HConnectionManager.java | 27 +------- .../apache/hadoop/hbase/client/HTable.java | 18 +++++- .../org/apache/hadoop/hbase/client/Put.java | 15 ----- .../apache/hadoop/hbase/client/Result.java | 62 +++--------------- .../hadoop/hbase/io/HbaseObjectWritable.java | 5 -- .../hadoop/hbase/ipc/HRegionInterface.java | 11 ---- .../hbase/regionserver/HRegionServer.java | 21 ------- .../hadoop/hbase/thrift/ThriftServer.java | 9 ++- .../hadoop/hbase/thrift/ThriftUtilities.java | 2 +- .../hadoop/hbase/TestSerialization.java | 12 ++-- .../hadoop/hbase/TimestampTestBase.java | 6 +- .../hbase/client/TestFromClientSide.java | 38 +++++------ .../hbase/regionserver/TestHRegion.java | 4 +- .../hadoop/hbase/util/TestMergeTool.java | 4 +- 21 files changed, 67 insertions(+), 343 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index c34db6cbbe0..d9dd67abb1a 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -721,6 +721,8 @@ Release 0.92.0 - Unreleased HBASE-4656 Note how dfs.support.append has to be enabled in 0.20.205.0 clusters HBASE-4699 Cleanup the UIs + HBASE-4552 Remove trivial 0.90 deprecated code from 0.92 and trunk. + (Jonathan Hsieh) NEW FEATURES HBASE-2001 Coprocessors: Colocate user code with regions (Mingjie Lai via diff --git a/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 31f5d8ab635..2ddb84c05b8 100644 --- a/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -55,22 +55,6 @@ public class HColumnDescriptor implements WritableComparable // Version 8 -- reintroduction of bloom filters, changed from boolean to enum private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)8; - /** - * The type of compression. - * @see org.apache.hadoop.io.SequenceFile.Writer - * @deprecated Compression now means which compression library - * rather than 'what' to compress. - */ - @Deprecated - public static enum CompressionType { - /** Do not compress records. */ - NONE, - /** Compress values only, each separately. */ - RECORD, - /** Compress sequences of records together in blocks. */ - BLOCK - } - public static final String COMPRESSION = "COMPRESSION"; public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; public static final String BLOCKCACHE = "BLOCKCACHE"; diff --git a/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/src/main/java/org/apache/hadoop/hbase/KeyValue.java index af52b2c9942..e68e486a11f 100644 --- a/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -1736,20 +1736,6 @@ public class KeyValue implements Writable, HeapSize { return new KeyValue(row, null, null, ts, Type.Maximum); } - /** - * @param row - row key (arbitrary byte array) - * @param c column - {@link #parseColumn(byte[])} is called to split - * the column. - * @param ts - timestamp - * @return First possible key on passed row, column and timestamp - * @deprecated - */ - public static KeyValue createFirstOnRow(final byte [] row, final byte [] c, - final long ts) { - byte [][] split = parseColumn(c); - return new KeyValue(row, split[0], split[1], ts, Type.Maximum); - } - /** * Create a KeyValue for the specified row, family and qualifier that would be * smaller than all other possible KeyValues that have the same row,family,qualifier. diff --git a/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/src/main/java/org/apache/hadoop/hbase/client/Delete.java index 9aaeb90e3bd..e524d8e0dca 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -280,32 +280,4 @@ public class Delete extends Mutation } writeAttributes(out); } - - /** - * Delete all versions of the specified column, given in - * family:qualifier notation, and with a timestamp less than - * or equal to the specified timestamp. - * @param column colon-delimited family and qualifier - * @param timestamp maximum version timestamp - * @deprecated use {@link #deleteColumn(byte[], byte[], long)} instead - * @return this for invocation chaining - */ - public Delete deleteColumns(byte [] column, long timestamp) { - byte [][] parts = KeyValue.parseColumn(column); - this.deleteColumns(parts[0], parts[1], timestamp); - return this; - } - - /** - * Delete the latest version of the specified column, given in - * family:qualifier notation. - * @param column colon-delimited family and qualifier - * @deprecated use {@link #deleteColumn(byte[], byte[])} instead - * @return this for invocation chaining - */ - public Delete deleteColumn(byte [] column) { - byte [][] parts = KeyValue.parseColumn(column); - this.deleteColumn(parts[0], parts[1], HConstants.LATEST_TIMESTAMP); - return this; - } } diff --git a/src/main/java/org/apache/hadoop/hbase/client/Get.java b/src/main/java/org/apache/hadoop/hbase/client/Get.java index 3f2d33c8abe..93c9e8961e5 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -457,41 +457,4 @@ public class Get extends OperationWithAttributes throw new RuntimeException("Can't find class " + className); } } - - /** - * Adds an array of columns specified the old format, family:qualifier. - *

- * Overrides previous calls to addFamily for any families in the input. - * @param columns array of columns, formatted as

family:qualifier
- * @deprecated issue multiple {@link #addColumn(byte[], byte[])} instead - * @return this for invocation chaining - */ - @SuppressWarnings({"deprecation"}) - public Get addColumns(byte [][] columns) { - if (columns == null) return this; - for (byte[] column : columns) { - try { - addColumn(column); - } catch (Exception ignored) { - } - } - return this; - } - - /** - * - * @param column Old format column. - * @return This. - * @deprecated use {@link #addColumn(byte[], byte[])} instead - */ - public Get addColumn(final byte [] column) { - if (column == null) return this; - byte [][] split = KeyValue.parseColumn(column); - if (split.length > 1 && split[1] != null && split[1].length > 0) { - addColumn(split[0], split[1]); - } else { - addFamily(split[0]); - } - return this; - } } diff --git a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index e10d7e91260..0480d1eccf0 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.NotServingRegionException; @@ -951,22 +950,6 @@ public class HBaseAdmin implements Abortable, Closeable { } } - /** - * Modify an existing column family on a table. - * Asynchronous operation. - * - * @param tableName name of table - * @param columnName name of column to be modified - * @param descriptor new column descriptor to use - * @throws IOException if a remote or network exception occurs - * @deprecated The columnName is redundant. Use {@link #addColumn(String, HColumnDescriptor)} - */ - public void modifyColumn(final String tableName, final String columnName, - HColumnDescriptor descriptor) - throws IOException { - modifyColumn(tableName, descriptor); - } - /** * Modify an existing column family on a table. * Asynchronous operation. @@ -980,22 +963,6 @@ public class HBaseAdmin implements Abortable, Closeable { modifyColumn(Bytes.toBytes(tableName), descriptor); } - /** - * Modify an existing column family on a table. - * Asynchronous operation. - * - * @param tableName name of table - * @param columnName name of column to be modified - * @param descriptor new column descriptor to use - * @throws IOException if a remote or network exception occurs - * @deprecated The columnName is redundant. Use {@link #modifyColumn(byte[], HColumnDescriptor)} - */ - public void modifyColumn(final byte [] tableName, final byte [] columnName, - HColumnDescriptor descriptor) - throws IOException { - modifyColumn(tableName, descriptor); - } - /** * Modify an existing column family on a table. * Asynchronous operation. @@ -1308,24 +1275,6 @@ public class HBaseAdmin implements Abortable, Closeable { getMaster().move(encodedRegionName, destServerName); } - /** - * Tries to assign a region. Region could be reassigned to the same server. - * - * @param regionName - * Region name to assign. - * @param force - * True to force assign. - * @throws MasterNotRunningException - * @throws ZooKeeperConnectionException - * @throws IOException - * @deprecated The force is unused.Use {@link #assign(byte[])} - */ - public void assign(final byte[] regionName, final boolean force) - throws MasterNotRunningException, ZooKeeperConnectionException, - IOException { - getMaster().assign(regionName, force); - } - /** * @param regionName * Region name to assign. @@ -1524,18 +1473,6 @@ public class HBaseAdmin implements Abortable, Closeable { } } - /** - * Stop the designated regionserver. - * @throws IOException if a remote or network exception occurs - * @deprecated Use {@link #stopRegionServer(String)} - */ - public synchronized void stopRegionServer(final HServerAddress hsa) - throws IOException { - HRegionInterface rs = - this.connection.getHRegionConnection(hsa); - rs.stop("Called by admin client " + this.connection.toString()); - } - /** * Stop the designated regionserver * @param hostnamePort Hostname and port delimited by a : as in diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index 66516d838e3..fadbb8dc81c 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -283,7 +283,7 @@ public interface HConnection extends Abortable, Closeable { * @throws IOException if there are problems talking to META. Per-item * exceptions are stored in the results array. */ - public void processBatch(List actions, final byte[] tableName, + public void processBatch(List actions, final byte[] tableName, ExecutorService pool, Object[] results) throws IOException, InterruptedException; @@ -327,20 +327,6 @@ public interface HConnection extends Abortable, Closeable { final Batch.Call call, final Batch.Callback callback) throws IOException, Throwable; - /** - * Process a batch of Puts. - * - * @param list The collection of actions. The list is mutated: all successful Puts - * are removed from the list. - * @param tableName Name of the hbase table - * @param pool Thread pool for parallel execution - * @throws IOException - * @deprecated Use HConnectionManager::processBatch instead. - */ - public void processBatchOfPuts(List list, - final byte[] tableName, ExecutorService pool) - throws IOException; - /** * Enable or disable region cache prefetch for the table. It will be * applied for the given table's all HTable instances within this diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 06a231255b2..0e7fd34f091 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -1301,7 +1301,7 @@ public class HConnectionManager { }; } - public void processBatch(List list, + public void processBatch(List list, final byte[] tableName, ExecutorService pool, Object[] results) throws IOException, InterruptedException { @@ -1549,31 +1549,6 @@ public class HConnectionManager { } } - /** - * @deprecated Use HConnectionManager::processBatch instead. - */ - public void processBatchOfPuts(List list, - final byte[] tableName, - ExecutorService pool) throws IOException { - Object[] results = new Object[list.size()]; - try { - processBatch((List) list, tableName, pool, results); - } catch (InterruptedException e) { - throw new IOException(e); - } finally { - - // mutate list so that it is empty for complete success, or contains only failed records - // results are returned in the same order as the requests in list - // walk the list backwards, so we can remove from list without impacting the indexes of earlier members - for (int i = results.length - 1; i>=0; i--) { - if (results[i] instanceof Result) { - // successful Puts are removed from the list here. - list.remove(i); - } - } - } - } - private Throwable translateException(Throwable t) throws IOException { if (t instanceof UndeclaredThrowableException) { t = t.getCause(); diff --git a/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/src/main/java/org/apache/hadoop/hbase/client/HTable.java index fadf217de62..eafb4fb3102 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -873,7 +873,23 @@ public class HTable implements HTableInterface, Closeable { @Override public void flushCommits() throws IOException { try { - this.connection.processBatchOfPuts(writeBuffer, tableName, pool); + Object[] results = new Object[writeBuffer.size()]; + try { + this.connection.processBatch(writeBuffer, tableName, pool, results); + } catch (InterruptedException e) { + throw new IOException(e); + } finally { + // mutate list so that it is empty for complete success, or contains + // only failed records results are returned in the same order as the + // requests in list walk the list backwards, so we can remove from list + // without impacting the indexes of earlier members + for (int i = results.length - 1; i>=0; i--) { + if (results[i] instanceof Result) { + // successful Puts are removed from the list here. + writeBuffer.remove(i); + } + } + } } finally { if (clearBufferOnFail) { writeBuffer.clear(); diff --git a/src/main/java/org/apache/hadoop/hbase/client/Put.java b/src/main/java/org/apache/hadoop/hbase/client/Put.java index ac6793a2801..c09b339d598 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -406,19 +406,4 @@ public class Put extends Mutation } writeAttributes(out); } - - /** - * Add the specified column and value, with the specified timestamp as - * its version to this Put operation. - * @param column Old style column name with family and qualifier put together - * with a colon. - * @param ts version timestamp - * @param value column value - * @deprecated use {@link #add(byte[], byte[], long, byte[])} instead - * @return true - */ - public Put add(byte [] column, long ts, byte [] value) { - byte [][] parts = KeyValue.parseColumn(column); - return add(parts[0], parts[1], ts, value); - } } diff --git a/src/main/java/org/apache/hadoop/hbase/client/Result.java b/src/main/java/org/apache/hadoop/hbase/client/Result.java index bfb8ad750ff..7723fd8d12e 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -20,18 +20,6 @@ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.SplitKeyValue; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.io.WritableWithSize; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.io.Writable; - import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -43,6 +31,13 @@ import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.SplitKeyValue; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.io.WritableWithSize; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.Writable; + /** * Single row result of a {@link Get} or {@link Scan} query.

* @@ -168,19 +163,6 @@ public class Result implements Writable, WritableWithSize { return isEmpty()? null: Arrays.asList(raw()); } - /** - * Returns a sorted array of KeyValues in this Result. - *

- * Since HBase 0.20.5 this is equivalent to {@link #raw}. Use - * {@link #raw} instead. - * - * @return sorted array of KeyValues - * @deprecated - */ - public KeyValue[] sorted() { - return raw(); // side effect of loading this.kvs - } - /** * Return the KeyValues for the specific column. The KeyValues are sorted in * the {@link KeyValue#COMPARATOR} order. That implies the first entry in @@ -399,32 +381,6 @@ public class Result implements Writable, WritableWithSize { return returnMap; } - private Map.Entry getKeyValue(byte[] family, byte[] qualifier) { - if(this.familyMap == null) { - getMap(); - } - if(isEmpty()) { - return null; - } - NavigableMap> qualifierMap = - familyMap.get(family); - if(qualifierMap == null) { - return null; - } - NavigableMap versionMap = - getVersionMap(qualifierMap, qualifier); - if(versionMap == null) { - return null; - } - return versionMap.firstEntry(); - } - - private NavigableMap getVersionMap( - NavigableMap> qualifierMap, byte [] qualifier) { - return qualifier != null? - qualifierMap.get(qualifier): qualifierMap.get(new byte[0]); - } - /** * Returns the value of the first column in the Result. * @return value of the first column @@ -675,8 +631,8 @@ public class Result implements Writable, WritableWithSize { throw new Exception("This row doesn't have the same number of KVs: " + res1.toString() + " compared to " + res2.toString()); } - KeyValue[] ourKVs = res1.sorted(); - KeyValue[] replicatedKVs = res2.sorted(); + KeyValue[] ourKVs = res1.raw(); + KeyValue[] replicatedKVs = res2.raw(); for (int i = 0; i < res1.size(); i++) { if (!ourKVs[i].equals(replicatedKVs[i]) && !Bytes.equals(ourKVs[i].getValue(), replicatedKVs[i].getValue())) { diff --git a/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java b/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java index 03b19c441b3..d8c2f167438 100644 --- a/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java +++ b/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java @@ -51,8 +51,6 @@ import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.MultiPut; -import org.apache.hadoop.hbase.client.MultiPutResponse; import org.apache.hadoop.hbase.client.MultiAction; import org.apache.hadoop.hbase.client.Action; import org.apache.hadoop.hbase.client.MultiResponse; @@ -203,9 +201,6 @@ public class HbaseObjectWritable implements Writable, WritableWithSize, Configur addToMap(Delete [].class, code++); - addToMap(MultiPut.class, code++); - addToMap(MultiPutResponse.class, code++); - addToMap(HLog.Entry.class, code++); addToMap(HLog.Entry[].class, code++); addToMap(HLogKey.class, code++); diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java b/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java index 1f8b629dc63..319388db3dc 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java @@ -33,8 +33,6 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.MultiAction; -import org.apache.hadoop.hbase.client.MultiPut; -import org.apache.hadoop.hbase.client.MultiPutResponse; import org.apache.hadoop.hbase.client.MultiResponse; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -324,15 +322,6 @@ public interface HRegionInterface extends VersionedProtocol, Stoppable, Abortabl */ public MultiResponse multi(MultiAction multi) throws IOException; - /** - * Multi put for putting multiple regions worth of puts at once. - * - * @param puts the request - * @return the reply - * @throws IOException e - */ - public MultiPutResponse multiPut(MultiPut puts) throws IOException; - /** * Bulk load an HFile into an open region */ diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 5ed781bb2fc..e6a10b8b081 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -88,8 +88,6 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.MultiAction; -import org.apache.hadoop.hbase.client.MultiPut; -import org.apache.hadoop.hbase.client.MultiPutResponse; import org.apache.hadoop.hbase.client.MultiResponse; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -3116,25 +3114,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, return response; } - /** - * @deprecated Use HRegionServer.multi( MultiAction action) instead - */ - @Override - public MultiPutResponse multiPut(MultiPut puts) throws IOException { - checkOpen(); - MultiPutResponse resp = new MultiPutResponse(); - - // do each region as it's own. - for (Map.Entry> e : puts.puts.entrySet()) { - int result = put(e.getKey(), e.getValue()); - resp.addResult(e.getKey(), result); - - e.getValue().clear(); // clear some RAM - } - - return resp; - } - /** * Executes a single {@link org.apache.hadoop.hbase.ipc.CoprocessorProtocol} * method using the registered protocol handlers. diff --git a/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java b/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java index a46aceec409..d9941fb754a 100644 --- a/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java +++ b/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; @@ -318,7 +317,7 @@ public class ThriftServer { get.addColumn(family, qualifier); } Result result = table.get(get); - return ThriftUtilities.cellFromHBase(result.sorted()); + return ThriftUtilities.cellFromHBase(result.raw()); } catch (IOException e) { throw new IOError(e.getMessage()); } @@ -346,7 +345,7 @@ public class ThriftServer { get.addColumn(family, qualifier); get.setMaxVersions(numVersions); Result result = table.get(get); - return ThriftUtilities.cellFromHBase(result.sorted()); + return ThriftUtilities.cellFromHBase(result.raw()); } catch (IOException e) { throw new IOError(e.getMessage()); } @@ -378,7 +377,7 @@ public class ThriftServer { get.setTimeRange(Long.MIN_VALUE, timestamp); get.setMaxVersions(numVersions); Result result = table.get(get); - return ThriftUtilities.cellFromHBase(result.sorted()); + return ThriftUtilities.cellFromHBase(result.raw()); } catch (IOException e) { throw new IOError(e.getMessage()); } @@ -916,7 +915,7 @@ public class ThriftServer { try { HTable table = getTable(getBytes(tableName)); Result result = table.getRowOrBefore(getBytes(row), getBytes(family)); - return ThriftUtilities.cellFromHBase(result.sorted()); + return ThriftUtilities.cellFromHBase(result.raw()); } catch (IOException e) { throw new IOError(e.getMessage()); } diff --git a/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java b/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java index c77c13e094f..790f034f071 100644 --- a/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java +++ b/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java @@ -135,7 +135,7 @@ public class ThriftUtilities { TRowResult result = new TRowResult(); result.row = ByteBuffer.wrap(result_.getRow()); result.columns = new TreeMap(); - for(KeyValue kv : result_.sorted()) { + for(KeyValue kv : result_.raw()) { result.columns.put( ByteBuffer.wrap(KeyValue.makeColumn(kv.getFamily(), kv.getQualifier())), diff --git a/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index b6a4c7a9543..51b90969e53 100644 --- a/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -355,10 +355,10 @@ public class TestSerialization { Result deResult = (Result)Writables.getWritable(rb, new Result()); assertTrue("results are not equivalent, first key mismatch", - result.sorted()[0].equals(deResult.sorted()[0])); + result.raw()[0].equals(deResult.raw()[0])); assertTrue("results are not equivalent, second key mismatch", - result.sorted()[1].equals(deResult.sorted()[1])); + result.raw()[1].equals(deResult.raw()[1])); // Test empty Result Result r = new Result(); @@ -394,9 +394,9 @@ public class TestSerialization { // Call sorted() first deResult = (Result)Writables.getWritable(rb, new Result()); assertTrue("results are not equivalent, first key mismatch", - result.sorted()[0].equals(deResult.sorted()[0])); + result.raw()[0].equals(deResult.raw()[0])); assertTrue("results are not equivalent, second key mismatch", - result.sorted()[1].equals(deResult.sorted()[1])); + result.raw()[1].equals(deResult.raw()[1])); // Call raw() first deResult = (Result)Writables.getWritable(rb, new Result()); @@ -443,8 +443,8 @@ public class TestSerialization { assertTrue(results.length == deResults.length); for(int i=0;i