diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index abf6b1cc417..c71ee0d50e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -110,7 +110,6 @@ public class Get extends Query this.storeOffset = get.getRowOffsetPerColumnFamily(); this.tr = get.getTimeRange(); this.checkExistenceOnly = get.isCheckExistenceOnly(); - this.closestRowBefore = get.isClosestRowBefore(); Map> fams = get.getFamilyMap(); for (Map.Entry> entry : fams.entrySet()) { byte [] fam = entry.getKey(); @@ -137,12 +136,23 @@ public class Get extends Query return this; } + /** + * This will always return the default value which is false as client cannot set the value to this + * property any more. + * @deprecated since 2.0.0 and will be removed in 3.0.0 + */ + @Deprecated public boolean isClosestRowBefore() { return closestRowBefore; } + /** + * This is not used any more and does nothing. Use reverse scan instead. + * @deprecated since 2.0.0 and will be removed in 3.0.0 + */ + @Deprecated public Get setClosestRowBefore(boolean closestRowBefore) { - this.closestRowBefore = closestRowBefore; + // do Nothing return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index 14b721bf08f..9d46bc732c6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -937,24 +937,6 @@ public class Scan extends Query { return (Scan) super.setIsolationLevel(level); } - /** - * Utility that creates a Scan that will do a small scan in reverse from passed row - * looking for next closest row. - * @param row - * @param family - * @return An instance of Scan primed with passed row and family to - * scan in reverse for one row only. - */ - static Scan createGetClosestRowOrBeforeReverseScan(byte[] row) { - // Below does not work if you add in family; need to add the family qualifier that is highest - // possible family qualifier. Do we have such a notion? Would have to be magic. - Scan scan = new Scan(row); - scan.setSmall(true); - scan.setReversed(true); - scan.setCaching(1); - return scan; - } - /** * Enable collection of {@link ScanMetrics}. For advanced users. * @param enabled Set to true to enable accumulating scan metrics diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index b72f0bbd7ee..36f6fb531bf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -97,7 +97,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue; @@ -122,12 +121,12 @@ import org.apache.hadoop.hbase.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.WALProtos; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; -import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.quotas.QuotaScope; import org.apache.hadoop.hbase.quotas.QuotaType; @@ -489,9 +488,6 @@ public final class ProtobufUtil { if (proto.hasExistenceOnly() && proto.getExistenceOnly()){ get.setCheckExistenceOnly(true); } - if (proto.hasClosestRowBefore() && proto.getClosestRowBefore()){ - get.setClosestRowBefore(true); - } if (proto.hasConsistency()) { get.setConsistency(toConsistency(proto.getConsistency())); } @@ -1077,9 +1073,6 @@ public final class ProtobufUtil { if (get.isCheckExistenceOnly()){ builder.setExistenceOnly(true); } - if (get.isClosestRowBefore()){ - builder.setClosestRowBefore(true); - } if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) { builder.setConsistency(toConsistency(get.getConsistency())); } @@ -1549,33 +1542,6 @@ public final class ProtobufUtil { // Start helpers for Client - /** - * A helper to get a row of the closet one before using client protocol. - * - * @param client - * @param regionName - * @param row - * @param family - * @return the row or the closestRowBefore if it doesn't exist - * @throws IOException - * @deprecated since 0.99 - use reversed scanner instead. - */ - @Deprecated - public static Result getRowOrBefore(final ClientService.BlockingInterface client, - final byte[] regionName, final byte[] row, - final byte[] family) throws IOException { - GetRequest request = - RequestConverter.buildGetRowOrBeforeRequest( - regionName, row, family); - try { - GetResponse response = client.get(null, request); - if (!response.hasResult()) return null; - return toResult(response.getResult()); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - /** * A helper to bulk load a list of HFiles using client protocol. * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 96260fd358a..c111be24d80 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionReques import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; @@ -123,35 +122,6 @@ public final class RequestConverter { // Start utilities for Client -/** - * Create a new protocol buffer GetRequest to get a row, all columns in a family. - * If there is no such row, return the closest row before it. - * - * @param regionName the name of the region to get - * @param row the row to get - * @param family the column family to get - * should return the immediate row before - * @return a protocol buffer GetReuqest - */ - public static GetRequest buildGetRowOrBeforeRequest( - final byte[] regionName, final byte[] row, final byte[] family) { - GetRequest.Builder builder = GetRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - - Column.Builder columnBuilder = Column.newBuilder(); - columnBuilder.setFamily(ByteStringer.wrap(family)); - ClientProtos.Get.Builder getBuilder = - ClientProtos.Get.newBuilder(); - getBuilder.setRow(ByteStringer.wrap(row)); - getBuilder.addColumn(columnBuilder.build()); - getBuilder.setClosestRowBefore(true); - builder.setGet(getBuilder.build()); - return builder.build(); - } - - /** * Create a protocol buffer GetRequest for a client Get * diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index 66a80b0ffb0..e9414406d4a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -514,14 +514,6 @@ public class TestClientNoCluster extends Configured implements Tool { ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder(); ByteString row = request.getGet().getRow(); Pair p = meta.get(row.toByteArray()); - if (p == null) { - if (request.getGet().getClosestRowBefore()) { - byte [] bytes = row.toByteArray(); - SortedMap> head = - bytes != null? meta.headMap(bytes): meta; - p = head == null? null: head.get(head.lastKey()); - } - } if (p != null) { resultBuilder.addCell(getRegionInfo(row, p.getFirst())); resultBuilder.addCell(getServer(row, p.getSecond())); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java index 0e18ecf043e..f37075163fb 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -168,7 +168,6 @@ public class TestGet { get.setReplicaId(2); get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); get.setCheckExistenceOnly(true); - get.setClosestRowBefore(true); get.setTimeRange(3, 4); get.setMaxVersions(11); get.setMaxResultsPerColumnFamily(10); @@ -191,9 +190,7 @@ public class TestGet { // from Get class assertEquals(get.isCheckExistenceOnly(), copyGet.isCheckExistenceOnly()); - assertEquals(get.isClosestRowBefore(), copyGet.isClosestRowBefore()); assertTrue(get.getTimeRange().equals(copyGet.getTimeRange())); - assertEquals(get.isClosestRowBefore(), copyGet.isClosestRowBefore()); assertEquals(get.getMaxVersions(), copyGet.getMaxVersions()); assertEquals(get.getMaxResultsPerColumnFamily(), copyGet.getMaxResultsPerColumnFamily()); assertEquals(get.getRowOffsetPerColumnFamily(), copyGet.getRowOffsetPerColumnFamily()); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index c90a6252fef..c4b1eec66b6 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -1926,26 +1926,6 @@ public final class ClientProtos { */ boolean getExistenceOnly(); - // optional bool closest_row_before = 11 [default = false]; - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-     * If the row to get doesn't exist, return the
-     * closest row before.
-     * 
- */ - boolean hasClosestRowBefore(); - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-     * If the row to get doesn't exist, return the
-     * closest row before.
-     * 
- */ - boolean getClosestRowBefore(); - // optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; /** * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; @@ -1963,8 +1943,7 @@ public final class ClientProtos { ** * The protocol buffer version of Get. * Unless existence_only is specified, return all the requested data - * for the row that matches exactly, or the one that immediately - * precedes it if closest_row_before is specified. + * for the row that matches exactly. * */ public static final class Get extends @@ -2087,18 +2066,13 @@ public final class ClientProtos { existenceOnly_ = input.readBool(); break; } - case 88: { - bitField0_ |= 0x00000100; - closestRowBefore_ = input.readBool(); - break; - } case 96: { int rawValue = input.readEnum(); org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(12, rawValue); } else { - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000100; consistency_ = value; } break; @@ -2371,32 +2345,6 @@ public final class ClientProtos { return existenceOnly_; } - // optional bool closest_row_before = 11 [default = false]; - public static final int CLOSEST_ROW_BEFORE_FIELD_NUMBER = 11; - private boolean closestRowBefore_; - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-     * If the row to get doesn't exist, return the
-     * closest row before.
-     * 
- */ - public boolean hasClosestRowBefore() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-     * If the row to get doesn't exist, return the
-     * closest row before.
-     * 
- */ - public boolean getClosestRowBefore() { - return closestRowBefore_; - } - // optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; public static final int CONSISTENCY_FIELD_NUMBER = 12; private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_; @@ -2404,7 +2352,7 @@ public final class ClientProtos { * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; */ public boolean hasConsistency() { - return ((bitField0_ & 0x00000200) == 0x00000200); + return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; @@ -2424,7 +2372,6 @@ public final class ClientProtos { storeLimit_ = 0; storeOffset_ = 0; existenceOnly_ = false; - closestRowBefore_ = false; consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG; } private byte memoizedIsInitialized = -1; @@ -2492,9 +2439,6 @@ public final class ClientProtos { output.writeBool(10, existenceOnly_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeBool(11, closestRowBefore_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { output.writeEnum(12, consistency_.getNumber()); } getUnknownFields().writeTo(output); @@ -2547,10 +2491,6 @@ public final class ClientProtos { .computeBoolSize(10, existenceOnly_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(11, closestRowBefore_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(12, consistency_.getNumber()); } @@ -2621,11 +2561,6 @@ public final class ClientProtos { result = result && (getExistenceOnly() == other.getExistenceOnly()); } - result = result && (hasClosestRowBefore() == other.hasClosestRowBefore()); - if (hasClosestRowBefore()) { - result = result && (getClosestRowBefore() - == other.getClosestRowBefore()); - } result = result && (hasConsistency() == other.hasConsistency()); if (hasConsistency()) { result = result && @@ -2684,10 +2619,6 @@ public final class ClientProtos { hash = (37 * hash) + EXISTENCE_ONLY_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getExistenceOnly()); } - if (hasClosestRowBefore()) { - hash = (37 * hash) + CLOSEST_ROW_BEFORE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getClosestRowBefore()); - } if (hasConsistency()) { hash = (37 * hash) + CONSISTENCY_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getConsistency()); @@ -2770,8 +2701,7 @@ public final class ClientProtos { ** * The protocol buffer version of Get. * Unless existence_only is specified, return all the requested data - * for the row that matches exactly, or the one that immediately - * precedes it if closest_row_before is specified. + * for the row that matches exactly. * */ public static final class Builder extends @@ -2849,10 +2779,8 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000100); existenceOnly_ = false; bitField0_ = (bitField0_ & ~0x00000200); - closestRowBefore_ = false; - bitField0_ = (bitField0_ & ~0x00000400); consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG; - bitField0_ = (bitField0_ & ~0x00000800); + bitField0_ = (bitField0_ & ~0x00000400); return this; } @@ -2942,10 +2870,6 @@ public final class ClientProtos { if (((from_bitField0_ & 0x00000400) == 0x00000400)) { to_bitField0_ |= 0x00000100; } - result.closestRowBefore_ = closestRowBefore_; - if (((from_bitField0_ & 0x00000800) == 0x00000800)) { - to_bitField0_ |= 0x00000200; - } result.consistency_ = consistency_; result.bitField0_ = to_bitField0_; onBuilt(); @@ -3039,9 +2963,6 @@ public final class ClientProtos { if (other.hasExistenceOnly()) { setExistenceOnly(other.getExistenceOnly()); } - if (other.hasClosestRowBefore()) { - setClosestRowBefore(other.getClosestRowBefore()); - } if (other.hasConsistency()) { setConsistency(other.getConsistency()); } @@ -4029,66 +3950,13 @@ public final class ClientProtos { return this; } - // optional bool closest_row_before = 11 [default = false]; - private boolean closestRowBefore_ ; - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-       * If the row to get doesn't exist, return the
-       * closest row before.
-       * 
- */ - public boolean hasClosestRowBefore() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-       * If the row to get doesn't exist, return the
-       * closest row before.
-       * 
- */ - public boolean getClosestRowBefore() { - return closestRowBefore_; - } - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-       * If the row to get doesn't exist, return the
-       * closest row before.
-       * 
- */ - public Builder setClosestRowBefore(boolean value) { - bitField0_ |= 0x00000400; - closestRowBefore_ = value; - onChanged(); - return this; - } - /** - * optional bool closest_row_before = 11 [default = false]; - * - *
-       * If the row to get doesn't exist, return the
-       * closest row before.
-       * 
- */ - public Builder clearClosestRowBefore() { - bitField0_ = (bitField0_ & ~0x00000400); - closestRowBefore_ = false; - onChanged(); - return this; - } - // optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG; /** * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; */ public boolean hasConsistency() { - return ((bitField0_ & 0x00000800) == 0x00000800); + return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; @@ -4103,7 +3971,7 @@ public final class ClientProtos { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000800; + bitField0_ |= 0x00000400; consistency_ = value; onChanged(); return this; @@ -4112,7 +3980,7 @@ public final class ClientProtos { * optional .hbase.pb.Consistency consistency = 12 [default = STRONG]; */ public Builder clearConsistency() { - bitField0_ = (bitField0_ & ~0x00000800); + bitField0_ = (bitField0_ & ~0x00000400); consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG; onChanged(); return this; @@ -33253,135 +33121,134 @@ public final class ClientProtos { "o\032\017MapReduce.proto\"\037\n\016Authorizations\022\r\n\005" + "label\030\001 \003(\t\"$\n\016CellVisibility\022\022\n\nexpress" + "ion\030\001 \002(\t\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tq" + - "ualifier\030\002 \003(\014\"\201\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" + + "ualifier\030\002 \003(\014\"\336\002\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" + "olumn\030\002 \003(\0132\020.hbase.pb.Column\022*\n\tattribu" + "te\030\003 \003(\0132\027.hbase.pb.NameBytesPair\022 \n\006fil" + "ter\030\004 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_rang" + "e\030\005 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_vers", "ions\030\006 \001(\r:\0011\022\032\n\014cache_blocks\030\007 \001(\010:\004tru" + "e\022\023\n\013store_limit\030\010 \001(\r\022\024\n\014store_offset\030\t" + - " \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\022!\n\022c" + - "losest_row_before\030\013 \001(\010:\005false\0222\n\013consis" + - "tency\030\014 \001(\0162\025.hbase.pb.Consistency:\006STRO" + - "NG\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase.pb.C" + - "ell\022\035\n\025associated_cell_count\030\002 \001(\005\022\016\n\006ex" + - "ists\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n\007part" + - "ial\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006region" + - "\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032\n\003get", - "\030\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetResponse\022 \n\006" + - "result\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\tCondi" + - "tion\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqua" + - "lifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162\025.hba" + - "se.pb.CompareType\022(\n\ncomparator\030\005 \002(\0132\024." + - "hbase.pb.Comparator\"\364\006\n\rMutationProto\022\013\n" + - "\003row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.hbase." + - "pb.MutationProto.MutationType\0229\n\014column_" + - "value\030\003 \003(\0132#.hbase.pb.MutationProto.Col" + - "umnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattribute", - "\030\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\ndurab" + - "ility\030\006 \001(\0162\".hbase.pb.MutationProto.Dur" + - "ability:\013USE_DEFAULT\022\'\n\ntime_range\030\007 \001(\013" + - "2\023.hbase.pb.TimeRange\022\035\n\025associated_cell" + - "_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\371\001\n\013ColumnVa" + - "lue\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_value\030\002" + - " \003(\01322.hbase.pb.MutationProto.ColumnValu" + - "e.QualifierValue\032\214\001\n\016QualifierValue\022\021\n\tq" + - "ualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimesta" + - "mp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hbase.pb", - ".MutationProto.DeleteType\022\014\n\004tags\030\005 \001(\014\"" + - "W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP_W" + - "AL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSY" + - "NC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020\000\022\r\n\t" + - "INCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n\nDele" + - "teType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030DELETE" + - "_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMILY\020\002\022" + - "\031\n\025DELETE_FAMILY_VERSION\020\003\"\242\001\n\rMutateReq" + - "uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" + - "ecifier\022)\n\010mutation\030\002 \002(\0132\027.hbase.pb.Mut", - "ationProto\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb" + - ".Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016Mutat" + - "eResponse\022 \n\006result\030\001 \001(\0132\020.hbase.pb.Res" + - "ult\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006colum" + - "n\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattribute\030\002" + - " \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tstart_r" + - "ow\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filter\030\005 \001" + - "(\0132\020.hbase.pb.Filter\022\'\n\ntime_range\030\006 \001(\013" + - "2\023.hbase.pb.TimeRange\022\027\n\014max_versions\030\007 " + - "\001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022\022\n\nba", - "tch_size\030\t \001(\r\022\027\n\017max_result_size\030\n \001(\004\022" + - "\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_offset\030\014 \001" + - "(\r\022&\n\036load_column_families_on_demand\030\r \001" + - "(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010:\005fal" + - "se\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb.Consi" + - "stency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n\013Scan" + - "Request\022)\n\006region\030\001 \001(\0132\031.hbase.pb.Regio" + - "nSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb.Scan" + - "\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004" + - " \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_call", - "_seq\030\006 \001(\004\022\037\n\027client_handles_partials\030\007 " + - "\001(\010\022!\n\031client_handles_heartbeats\030\010 \001(\010\022\032" + - "\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014ScanRespo" + - "nse\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscanner" + - "_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004" + - " \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022" + - "\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_resul" + - "t\030\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\022" + - "\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_metri" + - "cs\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n\024Bulk", - "LoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.hbase" + - ".pb.RegionSpecifier\022>\n\013family_path\030\002 \003(\013" + - "2).hbase.pb.BulkLoadHFileRequest.FamilyP" + - "ath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFamilyPat" + - "h\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkL" + - "oadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026Copr" + - "ocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014servi" + - "ce_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007re" + - "quest\030\004 \002(\014\"B\n\030CoprocessorServiceResult\022" + - "&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"", - "v\n\031CoprocessorServiceRequest\022)\n\006region\030\001" + - " \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004call\030" + - "\002 \002(\0132 .hbase.pb.CoprocessorServiceCall\"" + - "o\n\032CoprocessorServiceResponse\022)\n\006region\030" + - "\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n\005valu" + - "e\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Act" + - "ion\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.h" + - "base.pb.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hba" + - "se.pb.Get\0226\n\014service_call\030\004 \001(\0132 .hbase." + - "pb.CoprocessorServiceCall\"k\n\014RegionActio", - "n\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpeci" + - "fier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.h" + - "base.pb.Action\"D\n\017RegionLoadStats\022\027\n\014mem" + - "storeLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(" + - "\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index\030\001 \001(" + - "\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\te" + - "xception\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022" + - ":\n\016service_result\030\004 \001(\0132\".hbase.pb.Copro" + - "cessorServiceResult\022,\n\tloadStats\030\005 \001(\0132\031" + - ".hbase.pb.RegionLoadStats\"x\n\022RegionActio", - "nResult\0226\n\021resultOrException\030\001 \003(\0132\033.hba" + - "se.pb.ResultOrException\022*\n\texception\030\002 \001" + - "(\0132\027.hbase.pb.NameBytesPair\"x\n\014MultiRequ" + - "est\022,\n\014regionAction\030\001 \003(\0132\026.hbase.pb.Reg" + - "ionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tconditi" + - "on\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMultiRe" + - "sponse\0228\n\022regionActionResult\030\001 \003(\0132\034.hba" + - "se.pb.RegionActionResult\022\021\n\tprocessed\030\002 " + - "\001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELI" + - "NE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hbase.pb", - ".GetRequest\032\025.hbase.pb.GetResponse\022;\n\006Mu" + - "tate\022\027.hbase.pb.MutateRequest\032\030.hbase.pb" + - ".MutateResponse\0225\n\004Scan\022\025.hbase.pb.ScanR" + - "equest\032\026.hbase.pb.ScanResponse\022P\n\rBulkLo" + - "adHFile\022\036.hbase.pb.BulkLoadHFileRequest\032" + - "\037.hbase.pb.BulkLoadHFileResponse\022X\n\013Exec" + - "Service\022#.hbase.pb.CoprocessorServiceReq" + - "uest\032$.hbase.pb.CoprocessorServiceRespon" + - "se\022d\n\027ExecRegionServerService\022#.hbase.pb" + - ".CoprocessorServiceRequest\032$.hbase.pb.Co", - "processorServiceResponse\0228\n\005Multi\022\026.hbas" + - "e.pb.MultiRequest\032\027.hbase.pb.MultiRespon" + - "seBB\n*org.apache.hadoop.hbase.protobuf.g" + - "eneratedB\014ClientProtosH\001\210\001\001\240\001\001" + " \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\0222\n\013c" + + "onsistency\030\014 \001(\0162\025.hbase.pb.Consistency:" + + "\006STRONG\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase" + + ".pb.Cell\022\035\n\025associated_cell_count\030\002 \001(\005\022" + + "\016\n\006exists\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n" + + "\007partial\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006r" + + "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032" + + "\n\003get\030\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetRespons", + "e\022 \n\006result\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\t" + + "Condition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021" + + "\n\tqualifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162" + + "\025.hbase.pb.CompareType\022(\n\ncomparator\030\005 \002" + + "(\0132\024.hbase.pb.Comparator\"\364\006\n\rMutationPro" + + "to\022\013\n\003row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.h" + + "base.pb.MutationProto.MutationType\0229\n\014co" + + "lumn_value\030\003 \003(\0132#.hbase.pb.MutationProt" + + "o.ColumnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattr" + + "ibute\030\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\n", + "durability\030\006 \001(\0162\".hbase.pb.MutationProt" + + "o.Durability:\013USE_DEFAULT\022\'\n\ntime_range\030" + + "\007 \001(\0132\023.hbase.pb.TimeRange\022\035\n\025associated" + + "_cell_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\371\001\n\013Col" + + "umnValue\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_va" + + "lue\030\002 \003(\01322.hbase.pb.MutationProto.Colum" + + "nValue.QualifierValue\032\214\001\n\016QualifierValue" + + "\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\tti" + + "mestamp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hba" + + "se.pb.MutationProto.DeleteType\022\014\n\004tags\030\005", + " \001(\014\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010S" + + "KIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r" + + "\n\tFSYNC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020" + + "\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n" + + "\nDeleteType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030D" + + "ELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMI" + + "LY\020\002\022\031\n\025DELETE_FAMILY_VERSION\020\003\"\242\001\n\rMuta" + + "teRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Reg" + + "ionSpecifier\022)\n\010mutation\030\002 \002(\0132\027.hbase.p" + + "b.MutationProto\022&\n\tcondition\030\003 \001(\0132\023.hba", + "se.pb.Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016" + + "MutateResponse\022 \n\006result\030\001 \001(\0132\020.hbase.p" + + "b.Result\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006" + + "column\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattrib" + + "ute\030\002 \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tst" + + "art_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filte" + + "r\030\005 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_range\030" + + "\006 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_versio" + + "ns\030\007 \001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022" + + "\022\n\nbatch_size\030\t \001(\r\022\027\n\017max_result_size\030\n", + " \001(\004\022\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_offse" + + "t\030\014 \001(\r\022&\n\036load_column_families_on_deman" + + "d\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010" + + ":\005false\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb." + + "Consistency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n" + + "\013ScanRequest\022)\n\006region\030\001 \001(\0132\031.hbase.pb." + + "RegionSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb" + + ".Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_r" + + "ows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext" + + "_call_seq\030\006 \001(\004\022\037\n\027client_handles_partia", + "ls\030\007 \001(\010\022!\n\031client_handles_heartbeats\030\010 " + + "\001(\010\022\032\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014Scan" + + "Response\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nsc" + + "anner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003" + + "ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Re" + + "sult\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_" + + "result\030\007 \003(\010\022\036\n\026more_results_in_region\030\010" + + " \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_" + + "metrics\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n" + + "\024BulkLoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.", + "hbase.pb.RegionSpecifier\022>\n\013family_path\030" + + "\002 \003(\0132).hbase.pb.BulkLoadHFileRequest.Fa" + + "milyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFami" + + "lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025" + + "BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n" + + "\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014" + + "service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022" + + "\017\n\007request\030\004 \002(\014\"B\n\030CoprocessorServiceRe" + + "sult\022&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytes" + + "Pair\"v\n\031CoprocessorServiceRequest\022)\n\006reg", + "ion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004" + + "call\030\002 \002(\0132 .hbase.pb.CoprocessorService" + + "Call\"o\n\032CoprocessorServiceResponse\022)\n\006re" + + "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n" + + "\005value\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001" + + "\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(" + + "\0132\027.hbase.pb.MutationProto\022\032\n\003get\030\003 \001(\0132" + + "\r.hbase.pb.Get\0226\n\014service_call\030\004 \001(\0132 .h" + + "base.pb.CoprocessorServiceCall\"k\n\014Region" + + "Action\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region", + "Specifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(" + + "\0132\020.hbase.pb.Action\"D\n\017RegionLoadStats\022\027" + + "\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy" + + "\030\002 \001(\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index" + + "\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result" + + "\022*\n\texception\030\003 \001(\0132\027.hbase.pb.NameBytes" + + "Pair\022:\n\016service_result\030\004 \001(\0132\".hbase.pb." + + "CoprocessorServiceResult\022,\n\tloadStats\030\005 " + + "\001(\0132\031.hbase.pb.RegionLoadStats\"x\n\022Region" + + "ActionResult\0226\n\021resultOrException\030\001 \003(\0132", + "\033.hbase.pb.ResultOrException\022*\n\texceptio" + + "n\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mult" + + "iRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase.p" + + "b.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tco" + + "ndition\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMu" + + "ltiResponse\0228\n\022regionActionResult\030\001 \003(\0132" + + "\034.hbase.pb.RegionActionResult\022\021\n\tprocess" + + "ed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010T" + + "IMELINE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hba" + + "se.pb.GetRequest\032\025.hbase.pb.GetResponse\022", + ";\n\006Mutate\022\027.hbase.pb.MutateRequest\032\030.hba" + + "se.pb.MutateResponse\0225\n\004Scan\022\025.hbase.pb." + + "ScanRequest\032\026.hbase.pb.ScanResponse\022P\n\rB" + + "ulkLoadHFile\022\036.hbase.pb.BulkLoadHFileReq" + + "uest\032\037.hbase.pb.BulkLoadHFileResponse\022X\n" + + "\013ExecService\022#.hbase.pb.CoprocessorServi" + + "ceRequest\032$.hbase.pb.CoprocessorServiceR" + + "esponse\022d\n\027ExecRegionServerService\022#.hba" + + "se.pb.CoprocessorServiceRequest\032$.hbase." + + "pb.CoprocessorServiceResponse\0228\n\005Multi\022\026", + ".hbase.pb.MultiRequest\032\027.hbase.pb.MultiR" + + "esponseBB\n*org.apache.hadoop.hbase.proto" + + "buf.generatedB\014ClientProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -33411,7 +33278,7 @@ public final class ClientProtos { internal_static_hbase_pb_Get_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_Get_descriptor, - new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "ClosestRowBefore", "Consistency", }); + new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "Consistency", }); internal_static_hbase_pb_Result_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_Result_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index 3390de7d617..101854d6812 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -64,8 +64,7 @@ enum Consistency { /** * The protocol buffer version of Get. * Unless existence_only is specified, return all the requested data - * for the row that matches exactly, or the one that immediately - * precedes it if closest_row_before is specified. + * for the row that matches exactly. */ message Get { required bytes row = 1; @@ -82,10 +81,6 @@ message Get { // the existence. optional bool existence_only = 10 [default = false]; - // If the row to get doesn't exist, return the - // closest row before. - optional bool closest_row_before = 11 [default = false]; - optional Consistency consistency = 12 [default = STRONG]; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 0f1a2381ca4..172b76320cf 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -675,10 +675,6 @@ public class RemoteHTable implements Table { return true; } - public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { - throw new IOException("getRowOrBefore not supported"); - } - @Override public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java index 67135467778..6ea743af1d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java @@ -115,20 +115,6 @@ public final class HTableWrapper implements Table { } } - /** - * @deprecated in 0.99 since setting clearBufferOnFail is deprecated. - */ - @Deprecated - public Result getRowOrBefore(byte[] row, byte[] family) - throws IOException { - Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(row); - Result startRowResult = null; - try (ResultScanner resultScanner = this.table.getScanner(scan)) { - startRowResult = resultScanner.next(); - } - return startRowResult; - } - public Result get(Get get) throws IOException { return table.get(get); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java index f47c9f418e3..2e6d514040d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java @@ -19,14 +19,14 @@ import java.io.IOException; import java.util.List; import java.util.NavigableSet; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; @@ -54,9 +54,9 @@ import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; -import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.wal.WALKey; import com.google.common.collect.ImmutableList; @@ -120,7 +120,7 @@ public abstract class BaseRegionObserver implements RegionObserver { @Override public void preSplit(ObserverContext e) throws IOException { } - + @Override public void preSplit(ObserverContext c, byte[] splitRow) throws IOException { @@ -130,22 +130,22 @@ public abstract class BaseRegionObserver implements RegionObserver { public void preSplitBeforePONR(ObserverContext ctx, byte[] splitKey, List metaEntries) throws IOException { } - + @Override public void preSplitAfterPONR( ObserverContext ctx) throws IOException { } - + @Override public void preRollBackSplit(ObserverContext ctx) throws IOException { } - + @Override public void postRollBackSplit( ObserverContext ctx) throws IOException { } - + @Override public void postCompleteSplit( ObserverContext ctx) throws IOException { @@ -218,18 +218,6 @@ public abstract class BaseRegionObserver implements RegionObserver { postCompact(e, store, resultFile); } - @Override - public void preGetClosestRowBefore(final ObserverContext e, - final byte [] row, final byte [] family, final Result result) - throws IOException { - } - - @Override - public void postGetClosestRowBefore(final ObserverContext e, - final byte [] row, final byte [] family, final Result result) - throws IOException { - } - @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { @@ -253,12 +241,12 @@ public abstract class BaseRegionObserver implements RegionObserver { } @Override - public void prePut(final ObserverContext e, + public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException { } @Override - public void postPut(final ObserverContext e, + public void postPut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException { } @@ -278,7 +266,7 @@ public abstract class BaseRegionObserver implements RegionObserver { final Delete delete, final WALEdit edit, final Durability durability) throws IOException { } - + @Override public void preBatchMutate(final ObserverContext c, final MiniBatchOperationInProgress miniBatchOp) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index fd19edee5d9..e87a590ca90 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -382,7 +382,7 @@ public interface RegionObserver extends Coprocessor { void preSplitBeforePONR(final ObserverContext ctx, byte[] splitKey, List metaEntries) throws IOException; - + /** * This will be called after PONR step as part of split transaction * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no @@ -391,9 +391,9 @@ public interface RegionObserver extends Coprocessor { * @throws IOException */ void preSplitAfterPONR(final ObserverContext ctx) throws IOException; - + /** - * This will be called before the roll back of the split region is completed + * This will be called before the roll back of the split region is completed * @param ctx * @throws IOException */ @@ -419,7 +419,7 @@ public interface RegionObserver extends Coprocessor { * Called before the region is reported as closed to the master. * @param c the environment provided by the region server * @param abortRequested true if the region server is aborting - * @throws IOException + * @throws IOException */ void preClose(final ObserverContext c, boolean abortRequested) throws IOException; @@ -432,40 +432,6 @@ public interface RegionObserver extends Coprocessor { void postClose(final ObserverContext c, boolean abortRequested); - /** - * Called before a client makes a GetClosestRowBefore request. - *

- * Call CoprocessorEnvironment#bypass to skip default actions - *

- * Call CoprocessorEnvironment#complete to skip any subsequent chained - * coprocessors - * @param c the environment provided by the region server - * @param row the row - * @param family the family - * @param result The result to return to the client if default processing - * is bypassed. Can be modified. Will not be used if default processing - * is not bypassed. - * @throws IOException if an error occurred on the coprocessor - */ - void preGetClosestRowBefore(final ObserverContext c, - final byte [] row, final byte [] family, final Result result) - throws IOException; - - /** - * Called after a client makes a GetClosestRowBefore request. - *

- * Call CoprocessorEnvironment#complete to skip any subsequent chained - * coprocessors - * @param c the environment provided by the region server - * @param row the row - * @param family the desired family - * @param result the result to return to the client, modify as necessary - * @throws IOException if an error occurred on the coprocessor - */ - void postGetClosestRowBefore(final ObserverContext c, - final byte [] row, final byte [] family, final Result result) - throws IOException; - /** * Called before the client performs a Get *

@@ -543,7 +509,7 @@ public interface RegionObserver extends Coprocessor { * @param durability Persistence guarantee for this Put * @throws IOException if an error occurred on the coprocessor */ - void prePut(final ObserverContext c, + void prePut(final ObserverContext c, final Put put, final WALEdit edit, final Durability durability) throws IOException; @@ -558,7 +524,7 @@ public interface RegionObserver extends Coprocessor { * @param durability Persistence guarantee for this Put * @throws IOException if an error occurred on the coprocessor */ - void postPut(final ObserverContext c, + void postPut(final ObserverContext c, final Put put, final WALEdit edit, final Durability durability) throws IOException; @@ -575,7 +541,7 @@ public interface RegionObserver extends Coprocessor { * @param durability Persistence guarantee for this Delete * @throws IOException if an error occurred on the coprocessor */ - void preDelete(final ObserverContext c, + void preDelete(final ObserverContext c, final Delete delete, final WALEdit edit, final Durability durability) throws IOException; /** @@ -611,7 +577,7 @@ public interface RegionObserver extends Coprocessor { void postDelete(final ObserverContext c, final Delete delete, final WALEdit edit, final Durability durability) throws IOException; - + /** * This will be called for every batch mutation operation happening at the server. This will be * called after acquiring the locks on the mutating rows and after applying the proper timestamp @@ -658,7 +624,7 @@ public interface RegionObserver extends Coprocessor { * Called after the completion of batch put/delete and will be called even if the batch operation * fails * @param ctx - * @param miniBatchOp + * @param miniBatchOp * @param success true if batch operation is successful otherwise false. * @throws IOException */ @@ -679,7 +645,7 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param put data to put if check succeeds - * @param result + * @param result * @return the return value to return to client if bypassing default * processing * @throws IOException if an error occurred on the coprocessor @@ -693,8 +659,8 @@ public interface RegionObserver extends Coprocessor { /** * Called before checkAndPut but after acquiring rowlock. *

- * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, + * Note: Caution to be taken for not doing any long time operation in this hook. + * Row will be locked for longer time. Trying to acquire lock on another row, within this, * can lead to potential deadlock. *

* Call CoprocessorEnvironment#bypass to skip default actions @@ -708,14 +674,14 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param put data to put if check succeeds - * @param result + * @param result * @return the return value to return to client if bypassing default * processing * @throws IOException if an error occurred on the coprocessor */ boolean preCheckAndPutAfterRowLock(final ObserverContext c, final byte[] row, final byte[] family, final byte[] qualifier, final CompareOp compareOp, - final ByteArrayComparable comparator, final Put put, + final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException; /** @@ -754,7 +720,7 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param delete delete to commit if check succeeds - * @param result + * @param result * @return the value to return to client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -767,8 +733,8 @@ public interface RegionObserver extends Coprocessor { /** * Called before checkAndDelete but after acquiring rowock. *

- * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, + * Note: Caution to be taken for not doing any long time operation in this hook. + * Row will be locked for longer time. Trying to acquire lock on another row, within this, * can lead to potential deadlock. *

* Call CoprocessorEnvironment#bypass to skip default actions @@ -782,7 +748,7 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param delete delete to commit if check succeeds - * @param result + * @param result * @return the value to return to client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -877,8 +843,8 @@ public interface RegionObserver extends Coprocessor { /** * Called before Append but after acquiring rowlock. *

- * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, + * Note: Caution to be taken for not doing any long time operation in this hook. + * Row will be locked for longer time. Trying to acquire lock on another row, within this, * can lead to potential deadlock. *

* Call CoprocessorEnvironment#bypass to skip default actions @@ -927,14 +893,14 @@ public interface RegionObserver extends Coprocessor { /** * Called before Increment but after acquiring rowlock. *

- * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, + * Note: Caution to be taken for not doing any long time operation in this hook. + * Row will be locked for longer time. Trying to acquire lock on another row, within this, * can lead to potential deadlock. *

* Call CoprocessorEnvironment#bypass to skip default actions *

* Call CoprocessorEnvironment#complete to skip any subsequent chained coprocessors - * + * * @param c * the environment provided by the region server * @param increment @@ -1227,7 +1193,7 @@ public interface RegionObserver extends Coprocessor { * Called before creation of Reader for a store file. * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no * effect in this hook. - * + * * @param ctx the environment provided by the region server * @param fs fileystem to read from * @param p path to the file @@ -1246,7 +1212,7 @@ public interface RegionObserver extends Coprocessor { /** * Called after the creation of Reader for a store file. - * + * * @param ctx the environment provided by the region server * @param fs fileystem to read from * @param p path to the file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 161e4b63bfa..cc8c3a82ae6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -31,15 +31,14 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.Bytes; @@ -384,85 +383,6 @@ public class DefaultMemStore implements MemStore { return result; } - /** - * @param state column/delete tracking state - */ - @Override - public void getRowKeyAtOrBefore(final GetClosestRowBeforeTracker state) { - getRowKeyAtOrBefore(cellSet, state); - getRowKeyAtOrBefore(snapshot, state); - } - - /* - * @param set - * @param state Accumulates deletes and candidates. - */ - private void getRowKeyAtOrBefore(final NavigableSet set, - final GetClosestRowBeforeTracker state) { - if (set.isEmpty()) { - return; - } - if (!walkForwardInSingleRow(set, state.getTargetKey(), state)) { - // Found nothing in row. Try backing up. - getRowKeyBefore(set, state); - } - } - - /* - * Walk forward in a row from firstOnRow. Presumption is that - * we have been passed the first possible key on a row. As we walk forward - * we accumulate deletes until we hit a candidate on the row at which point - * we return. - * @param set - * @param firstOnRow First possible key on this row. - * @param state - * @return True if we found a candidate walking this row. - */ - private boolean walkForwardInSingleRow(final SortedSet set, - final Cell firstOnRow, final GetClosestRowBeforeTracker state) { - boolean foundCandidate = false; - SortedSet tail = set.tailSet(firstOnRow); - if (tail.isEmpty()) return foundCandidate; - for (Iterator i = tail.iterator(); i.hasNext();) { - Cell kv = i.next(); - // Did we go beyond the target row? If so break. - if (state.isTooFar(kv, firstOnRow)) break; - if (state.isExpired(kv)) { - i.remove(); - continue; - } - // If we added something, this row is a contender. break. - if (state.handle(kv)) { - foundCandidate = true; - break; - } - } - return foundCandidate; - } - - /* - * Walk backwards through the passed set a row at a time until we run out of - * set or until we get a candidate. - * @param set - * @param state - */ - private void getRowKeyBefore(NavigableSet set, - final GetClosestRowBeforeTracker state) { - Cell firstOnRow = state.getTargetKey(); - for (Member p = memberOfPreviousRow(set, state, firstOnRow); - p != null; p = memberOfPreviousRow(p.set, state, firstOnRow)) { - // Make sure we don't fall out of our table. - if (!state.isTargetTable(p.cell)) break; - // Stop looking if we've exited the better candidate range. - if (!state.isBetterCandidate(p.cell)) break; - // Make into firstOnRow - firstOnRow = new KeyValue(p.cell.getRowArray(), p.cell.getRowOffset(), p.cell.getRowLength(), - HConstants.LATEST_TIMESTAMP); - // If we find something, break; - if (walkForwardInSingleRow(p.set, firstOnRow, state)) break; - } - } - /** * Only used by tests. TODO: Remove * @@ -622,42 +542,6 @@ public class DefaultMemStore implements MemStore { return addedSize; } - /* - * Immutable data structure to hold member found in set and the set it was - * found in. Include set because it is carrying context. - */ - private static class Member { - final Cell cell; - final NavigableSet set; - Member(final NavigableSet s, final Cell kv) { - this.cell = kv; - this.set = s; - } - } - - /* - * @param set Set to walk back in. Pass a first in row or we'll return - * same row (loop). - * @param state Utility and context. - * @param firstOnRow First item on the row after the one we want to find a - * member in. - * @return Null or member of row previous to firstOnRow - */ - private Member memberOfPreviousRow(NavigableSet set, - final GetClosestRowBeforeTracker state, final Cell firstOnRow) { - NavigableSet head = set.headSet(firstOnRow, false); - if (head.isEmpty()) return null; - for (Iterator i = head.descendingIterator(); i.hasNext();) { - Cell found = i.next(); - if (state.isExpired(found)) { - i.remove(); - continue; - } - return new Member(head, found); - } - return null; - } - /** * @return scanner on memstore and snapshot in this order. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java deleted file mode 100644 index 2df42861dfd..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.util.NavigableMap; -import java.util.NavigableSet; -import java.util.TreeMap; -import java.util.TreeSet; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * State and utility processing {@link HRegion#getClosestRowBefore(byte[], byte[])}. - * Like {@link ScanQueryMatcher} and {@link ScanDeleteTracker} but does not - * implement the {@link DeleteTracker} interface since state spans rows (There - * is no update nor reset method). - */ -@InterfaceAudience.Private -class GetClosestRowBeforeTracker { - private final KeyValue targetkey; - // Any cell w/ a ts older than this is expired. - private final long now; - private final long oldestUnexpiredTs; - private Cell candidate = null; - private final CellComparator cellComparator; - // Flag for whether we're doing getclosest on a metaregion. - private final boolean metaregion; - // Offset and length into targetkey demarking table name (if in a metaregion). - private final int rowoffset; - private final int tablenamePlusDelimiterLength; - - // Deletes keyed by row. Comparator compares on row portion of KeyValue only. - private final NavigableMap> deletes; - - /** - * @param c - * @param kv Presume first on row: i.e. empty column, maximum timestamp and - * a type of Type.Maximum - * @param ttl Time to live in ms for this Store - * @param metaregion True if this is hbase:meta or -ROOT- region. - */ - GetClosestRowBeforeTracker(final CellComparator c, final KeyValue kv, - final long ttl, final boolean metaregion) { - super(); - this.metaregion = metaregion; - this.targetkey = kv; - // If we are in a metaregion, then our table name is the prefix on the - // targetkey. - this.rowoffset = kv.getRowOffset(); - int l = -1; - if (metaregion) { - l = Bytes.searchDelimiterIndex(kv.getRowArray(), rowoffset, kv.getRowLength(), - HConstants.DELIMITER) - this.rowoffset; - } - this.tablenamePlusDelimiterLength = metaregion? l + 1: -1; - this.now = System.currentTimeMillis(); - this.oldestUnexpiredTs = now - ttl; - this.cellComparator = c; - this.deletes = new TreeMap>(new CellComparator.RowComparator()); - } - - /* - * Add the specified KeyValue to the list of deletes. - * @param kv - */ - private void addDelete(final Cell kv) { - NavigableSet rowdeletes = this.deletes.get(kv); - if (rowdeletes == null) { - rowdeletes = new TreeSet(this.cellComparator); - this.deletes.put(kv, rowdeletes); - } - rowdeletes.add(kv); - } - - /* - * @param kv Adds candidate if nearer the target than previous candidate. - * @return True if updated candidate. - */ - private boolean addCandidate(final Cell kv) { - if (!isDeleted(kv) && isBetterCandidate(kv)) { - this.candidate = kv; - return true; - } - return false; - } - - boolean isBetterCandidate(final Cell contender) { - return this.candidate == null || - (this.cellComparator.compareRows(this.candidate, contender) < 0 && - this.cellComparator.compareRows(contender, this.targetkey) <= 0); - } - - /* - * Check if specified KeyValue buffer has been deleted by a previously - * seen delete. - * @param kv - * @return true is the specified KeyValue is deleted, false if not - */ - private boolean isDeleted(final Cell kv) { - if (this.deletes.isEmpty()) return false; - NavigableSet rowdeletes = this.deletes.get(kv); - if (rowdeletes == null || rowdeletes.isEmpty()) return false; - return isDeleted(kv, rowdeletes); - } - - /** - * Check if the specified KeyValue buffer has been deleted by a previously - * seen delete. - * @param kv - * @param ds - * @return True is the specified KeyValue is deleted, false if not - */ - public boolean isDeleted(final Cell kv, final NavigableSet ds) { - if (deletes == null || deletes.isEmpty()) return false; - for (Cell d: ds) { - long kvts = kv.getTimestamp(); - long dts = d.getTimestamp(); - if (CellUtil.isDeleteFamily(d)) { - if (kvts <= dts) return true; - continue; - } - // Check column - int ret = CellComparator.compareQualifiers(kv, d); - if (ret <= -1) { - // This delete is for an earlier column. - continue; - } else if (ret >= 1) { - // Beyond this kv. - break; - } - // Check Timestamp - if (kvts > dts) return false; - - // Check Type - switch (KeyValue.Type.codeToType(d.getTypeByte())) { - case Delete: return kvts == dts; - case DeleteColumn: return true; - default: continue; - } - } - return false; - } - - /** - * @param cell - * @return true if the cell is expired - */ - public boolean isExpired(final Cell cell) { - return cell.getTimestamp() < this.oldestUnexpiredTs || - HStore.isCellTTLExpired(cell, this.oldestUnexpiredTs, this.now); - } - - /* - * Handle keys whose values hold deletes. - * Add to the set of deletes and then if the candidate keys contain any that - * might match, then check for a match and remove it. Implies candidates - * is made with a Comparator that ignores key type. - * @param kv - * @return True if we removed k from candidates. - */ - boolean handleDeletes(final Cell kv) { - addDelete(kv); - boolean deleted = false; - if (!hasCandidate()) return deleted; - if (isDeleted(this.candidate)) { - this.candidate = null; - deleted = true; - } - return deleted; - } - - /** - * Do right thing with passed key, add to deletes or add to candidates. - * @param kv - * @return True if we added a candidate - */ - boolean handle(final Cell kv) { - if (CellUtil.isDelete(kv)) { - handleDeletes(kv); - return false; - } - return addCandidate(kv); - } - - /** - * @return True if has candidate - */ - public boolean hasCandidate() { - return this.candidate != null; - } - - /** - * @return Best candidate or null. - */ - public Cell getCandidate() { - return this.candidate; - } - - public KeyValue getTargetKey() { - return this.targetkey; - } - - /** - * @param kv Current kv - * @param firstOnRow on row kv. - * @return True if we went too far, past the target key. - */ - boolean isTooFar(final Cell kv, final Cell firstOnRow) { - return this.cellComparator.compareRows(kv, firstOnRow) > 0; - } - - boolean isTargetTable(final Cell kv) { - if (!metaregion) return true; - // Compare start of keys row. Compare including delimiter. Saves having - // to calculate where tablename ends in the candidate kv. - return Bytes.compareTo(this.targetkey.getRowArray(), this.rowoffset, - this.tablenamePlusDelimiterLength, - kv.getRowArray(), kv.getRowOffset(), this.tablenamePlusDelimiterLength) == 0; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index d4e89e05961..af4271a9e5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2430,38 +2430,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // get() methods for client use. ////////////////////////////////////////////////////////////////////////////// - @Override - public Result getClosestRowBefore(final byte [] row, final byte [] family) throws IOException { - if (coprocessorHost != null) { - Result result = new Result(); - if (coprocessorHost.preGetClosestRowBefore(row, family, result)) { - return result; - } - } - // look across all the HStores for this region and determine what the - // closest key is across all column families, since the data may be sparse - checkRow(row, "getClosestRowBefore"); - startRegionOperation(Operation.GET); - this.readRequestsCount.increment(); - try { - Store store = getStore(family); - // get the closest key. (HStore.getRowKeyAtOrBefore can return null) - Cell key = store.getRowKeyAtOrBefore(row); - Result result = null; - if (key != null) { - Get get = new Get(CellUtil.cloneRow(key)); - get.addFamily(family); - result = get(get); - } - if (coprocessorHost != null) { - coprocessorHost.postGetClosestRowBefore(row, family, result); - } - return result; - } finally { - closeRegionOperation(Operation.GET); - } - } - @Override public RegionScanner getScanner(Scan scan) throws IOException { return getScanner(scan, null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 57ca3f12873..07d51c09f80 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1774,154 +1774,6 @@ public class HStore implements Store { return false; } - @Override - public Cell getRowKeyAtOrBefore(final byte[] row) throws IOException { - // If minVersions is set, we will not ignore expired KVs. - // As we're only looking for the latest matches, that should be OK. - // With minVersions > 0 we guarantee that any KV that has any version - // at all (expired or not) has at least one version that will not expire. - // Note that this method used to take a KeyValue as arguments. KeyValue - // can be back-dated, a row key cannot. - long ttlToUse = scanInfo.getMinVersions() > 0 ? Long.MAX_VALUE : this.scanInfo.getTtl(); - - KeyValue kv = new KeyValue(row, HConstants.LATEST_TIMESTAMP); - - GetClosestRowBeforeTracker state = new GetClosestRowBeforeTracker( - this.comparator, kv, ttlToUse, this.getRegionInfo().isMetaRegion()); - this.lock.readLock().lock(); - try { - // First go to the memstore. Pick up deletes and candidates. - this.memstore.getRowKeyAtOrBefore(state); - // Check if match, if we got a candidate on the asked for 'kv' row. - // Process each relevant store file. Run through from newest to oldest. - Iterator sfIterator = this.storeEngine.getStoreFileManager() - .getCandidateFilesForRowKeyBefore(state.getTargetKey()); - while (sfIterator.hasNext()) { - StoreFile sf = sfIterator.next(); - sfIterator.remove(); // Remove sf from iterator. - boolean haveNewCandidate = rowAtOrBeforeFromStoreFile(sf, state); - Cell candidate = state.getCandidate(); - // we have an optimization here which stops the search if we find exact match. - if (candidate != null && CellUtil.matchingRow(candidate, row)) { - return candidate; - } - if (haveNewCandidate) { - sfIterator = this.storeEngine.getStoreFileManager().updateCandidateFilesForRowKeyBefore( - sfIterator, state.getTargetKey(), candidate); - } - } - return state.getCandidate(); - } finally { - this.lock.readLock().unlock(); - } - } - - /* - * Check an individual MapFile for the row at or before a given row. - * @param f - * @param state - * @throws IOException - * @return True iff the candidate has been updated in the state. - */ - private boolean rowAtOrBeforeFromStoreFile(final StoreFile f, - final GetClosestRowBeforeTracker state) - throws IOException { - StoreFile.Reader r = f.getReader(); - if (r == null) { - LOG.warn("StoreFile " + f + " has a null Reader"); - return false; - } - if (r.getEntries() == 0) { - LOG.warn("StoreFile " + f + " is a empty store file"); - return false; - } - // TODO: Cache these keys rather than make each time? - Cell firstKV = r.getFirstKey(); - if (firstKV == null) return false; - Cell lastKV = r.getLastKey(); - Cell firstOnRow = state.getTargetKey(); - if (this.comparator.compareRows(lastKV, firstOnRow) < 0) { - // If last key in file is not of the target table, no candidates in this - // file. Return. - if (!state.isTargetTable(lastKV)) return false; - // If the row we're looking for is past the end of file, set search key to - // last key. TODO: Cache last and first key rather than make each time. - firstOnRow = CellUtil.createFirstOnRow(lastKV); - } - // Get a scanner that caches blocks and that uses pread. - HFileScanner scanner = r.getScanner(true, true, false); - // Seek scanner. If can't seek it, return. - if (!seekToScanner(scanner, firstOnRow, firstKV)) return false; - // If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN! - // Unlikely that there'll be an instance of actual first row in table. - if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true; - // If here, need to start backing up. - while (scanner.seekBefore(firstOnRow)) { - Cell kv = scanner.getCell(); - if (!state.isTargetTable(kv)) break; - if (!state.isBetterCandidate(kv)) break; - // Make new first on row. - firstOnRow = CellUtil.createFirstOnRow(kv); - // Seek scanner. If can't seek it, break. - if (!seekToScanner(scanner, firstOnRow, firstKV)) return false; - // If we find something, break; - if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true; - } - return false; - } - - /* - * Seek the file scanner to firstOnRow or first entry in file. - * @param scanner - * @param firstOnRow - * @param firstKV - * @return True if we successfully seeked scanner. - * @throws IOException - */ - private boolean seekToScanner(final HFileScanner scanner, - final Cell firstOnRow, - final Cell firstKV) - throws IOException { - Cell kv = firstOnRow; - // If firstOnRow < firstKV, set to firstKV - if (this.comparator.compareRows(firstKV, firstOnRow) == 0) kv = firstKV; - int result = scanner.seekTo(kv); - return result != -1; - } - - /* - * When we come in here, we are probably at the kv just before we break into - * the row that firstOnRow is on. Usually need to increment one time to get - * on to the row we are interested in. - * @param scanner - * @param firstOnRow - * @param state - * @return True we found a candidate. - * @throws IOException - */ - private boolean walkForwardInSingleRow(final HFileScanner scanner, - final Cell firstOnRow, - final GetClosestRowBeforeTracker state) - throws IOException { - boolean foundCandidate = false; - do { - Cell kv = scanner.getCell(); - // If we are not in the row, skip. - if (this.comparator.compareRows(kv, firstOnRow) < 0) continue; - // Did we go beyond the target row? If so break. - if (state.isTooFar(kv, firstOnRow)) break; - if (state.isExpired(kv)) { - continue; - } - // If we added something, this row is a contender. break. - if (state.handle(kv)) { - foundCandidate = true; - break; - } - } while(scanner.next()); - return foundCandidate; - } - @Override public boolean canSplit() { this.lock.readLock().lock(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 364b9c9b639..d24299db326 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -92,13 +92,6 @@ public interface MemStore extends HeapSize { */ long delete(final Cell deleteCell); - /** - * Find the key that matches row exactly, or the one that immediately precedes it. The - * target row key is set in state. - * @param state column/delete tracking state - */ - void getRowKeyAtOrBefore(final GetClosestRowBeforeTracker state); - /** * Given the specs of a column, update it, first by inserting a new record, * then removing the old one. Since there is only 1 KeyValue involved, the memstoreTS diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index d7be4b49114..5f3108660c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1933,32 +1933,21 @@ public class RSRpcServices implements HBaseRPCErrorHandler, quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.GET); - if (get.hasClosestRowBefore() && get.getClosestRowBefore()) { - if (get.getColumnCount() != 1) { - throw new DoNotRetryIOException( - "get ClosestRowBefore supports one and only one family now, not " - + get.getColumnCount() + " families"); - } - byte[] row = get.getRow().toByteArray(); - byte[] family = get.getColumn(0).getFamily().toByteArray(); - r = region.getClosestRowBefore(row, family); - } else { - Get clientGet = ProtobufUtil.toGet(get); - if (get.getExistenceOnly() && region.getCoprocessorHost() != null) { - existence = region.getCoprocessorHost().preExists(clientGet); - } - if (existence == null) { - r = region.get(clientGet); - if (get.getExistenceOnly()) { - boolean exists = r.getExists(); - if (region.getCoprocessorHost() != null) { - exists = region.getCoprocessorHost().postExists(clientGet, exists); - } - existence = exists; + Get clientGet = ProtobufUtil.toGet(get); + if (get.getExistenceOnly() && region.getCoprocessorHost() != null) { + existence = region.getCoprocessorHost().preExists(clientGet); + } + if (existence == null) { + r = region.get(clientGet); + if (get.getExistenceOnly()) { + boolean exists = r.getExists(); + if (region.getCoprocessorHost() != null) { + exists = region.getCoprocessorHost().postExists(clientGet, exists); } + existence = exists; } } - if (existence != null){ + if (existence != null) { ClientProtos.Result pbr = ProtobufUtil.toResult(existence, region.getRegionInfo().getReplicaId() != 0); builder.setResult(pbr); @@ -1974,8 +1963,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, throw new ServiceException(ie); } finally { if (regionServer.metricsRegionServer != null) { - regionServer.metricsRegionServer.updateGet( - EnvironmentEdgeManager.currentTime() - before); + regionServer.metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTime() - before); } if (quota != null) { quota.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 9da99ab9b8d..6d87057b5d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -380,17 +380,6 @@ public interface Region extends ConfigurationObserver { */ List get(Get get, boolean withCoprocessor) throws IOException; - /** - * Return all the data for the row that matches row exactly, - * or the one that immediately preceeds it, at or immediately before - * ts. - * @param row - * @param family - * @return result of the operation - * @throws IOException - */ - Result getClosestRowBefore(byte[] row, byte[] family) throws IOException; - /** * Return an iterator that scans over the HRegion, returning the indicated * columns and rows specified by the {@link Scan}. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 85d4b6431ba..13b83170a7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -782,41 +782,6 @@ public class RegionCoprocessorHost // RegionObserver support - /** - * @param row the row key - * @param family the family - * @param result the result set from the region - * @return true if default processing should be bypassed - * @exception IOException Exception - */ - public boolean preGetClosestRowBefore(final byte[] row, final byte[] family, - final Result result) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preGetClosestRowBefore(ctx, row, family, result); - } - }); - } - - /** - * @param row the row key - * @param family the family - * @param result the result set from the region - * @exception IOException Exception - */ - public void postGetClosestRowBefore(final byte[] row, final byte[] family, - final Result result) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postGetClosestRowBefore(ctx, row, family, result); - } - }); - } - /** * @param get the Get request * @return true if default processing should be bypassed diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 46fce673cda..edc166ebba5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -143,20 +143,6 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf */ void rollback(final Cell cell); - /** - * Find the key that matches row exactly, or the one that immediately precedes it. WARNING: - * Only use this method on a table where writes occur with strictly increasing timestamps. This - * method assumes this pattern of writes in order to make it reasonably performant. Also our - * search is dependent on the axiom that deletes are for cells that are in the container that - * follows whether a memstore snapshot or a storefile, not for the current container: i.e. we'll - * see deletes before we come across cells we are to delete. Presumption is that the - * memstore#kvset is processed before memstore#snapshot and so on. - * @param row The row key of the targeted row. - * @return Found Cell or null if none found. - * @throws IOException - */ - Cell getRowKeyAtOrBefore(final byte[] row) throws IOException; - FileSystem getFileSystem(); /* diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 8bd69a29f47..275d4dadb07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -661,7 +661,6 @@ public class AccessController extends BaseMasterAndRegionObserver } private enum OpType { - GET_CLOSEST_ROW_BEFORE("getClosestRowBefore"), GET("get"), EXISTS("exists"), SCAN("scan"), @@ -1424,28 +1423,6 @@ public class AccessController extends BaseMasterAndRegionObserver return scanner; } - @Override - public void preGetClosestRowBefore(final ObserverContext c, - final byte [] row, final byte [] family, final Result result) - throws IOException { - assert family != null; - RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = makeFamilyMap(family, null); - User user = getActiveUser(); - AuthResult authResult = permissionGranted(OpType.GET_CLOSEST_ROW_BEFORE, user, env, families, - Action.READ); - if (!authResult.isAllowed() && cellFeaturesEnabled && !compatibleEarlyTermination) { - authResult.setAllowed(checkCoveringPermission(OpType.GET_CLOSEST_ROW_BEFORE, env, row, - families, HConstants.LATEST_TIMESTAMP, Action.READ)); - authResult.setReason("Covering cell set"); - } - logResult(authResult); - if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); - } - } - private void internalPreRead(final ObserverContext c, final Query query, OpType opType) throws IOException { Filter filter = query.getFilter(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index feec5f8dc60..86b6c35b3de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; @@ -4155,4 +4156,28 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]); } + + public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException { + Scan scan = new Scan(row); + scan.setSmall(true); + scan.setCaching(1); + scan.setReversed(true); + scan.addFamily(family); + try (RegionScanner scanner = r.getScanner(scan)) { + List cells = new ArrayList(1); + scanner.next(cells); + if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) { + return null; + } + return Result.create(cells); + } + } + + private boolean isTargetTable(final byte[] inRow, Cell c) { + String inputRowString = Bytes.toString(inRow); + int i = inputRowString.indexOf(HConstants.DELIMITER); + String outputRowString = Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength()); + int o = outputRowString.indexOf(HConstants.DELIMITER); + return inputRowString.substring(0, i).equals(outputRowString.substring(0, o)); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 5ab7424bccf..6dc4394cfbf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -4209,83 +4209,56 @@ public class TestFromClientSide { region.flush(true); Result result; - Get get = null; // Test before first that null is returned - get = new Get(beforeFirstRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); - assertTrue(result.isEmpty()); + result = getReverseScanResult(table, beforeFirstRow, + HConstants.CATALOG_FAMILY); + assertNull(result); // Test at first that first is returned - get = new Get(firstRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + result = getReverseScanResult(table, firstRow, HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), firstRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); // Test in between first and second that first is returned - get = new Get(beforeSecondRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + result = getReverseScanResult(table, beforeSecondRow, HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), firstRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); // Test at second make sure second is returned - get = new Get(secondRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + result = getReverseScanResult(table, secondRow, HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), secondRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); // Test in second and third, make sure second is returned - get = new Get(beforeThirdRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + result = getReverseScanResult(table, beforeThirdRow, HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), secondRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); // Test at third make sure third is returned - get = new Get(thirdRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + result = getReverseScanResult(table, thirdRow, HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), thirdRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); // Test in third and forth, make sure third is returned - get = new Get(beforeForthRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + result = getReverseScanResult(table, beforeForthRow, HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), thirdRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); // Test at forth make sure forth is returned - get = new Get(forthRow); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + result = getReverseScanResult(table, forthRow, HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), forthRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); // Test after forth make sure forth is returned - get = new Get(Bytes.add(forthRow, one)); - get.setClosestRowBefore(true); - get.addFamily(HConstants.CATALOG_FAMILY); - result = table.get(get); + result = getReverseScanResult(table, Bytes.add(forthRow, one), HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); assertTrue(Bytes.equals(result.getRow(), forthRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); @@ -4293,6 +4266,17 @@ public class TestFromClientSide { } } + private Result getReverseScanResult(Table table, byte[] row, byte[] fam) throws IOException { + Scan scan = new Scan(row); + scan.setSmall(true); + scan.setReversed(true); + scan.setCaching(1); + scan.addFamily(fam); + try (ResultScanner scanner = table.getScanner(scan)) { + return scanner.next(); + } + } + /** * For HBASE-2156 * @throws Exception diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java index d92d3015c7a..dbb1cd144f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java @@ -85,7 +85,6 @@ public class TestFromClientSideNoCodec { // Check getRowOrBefore byte[] f = fs[0]; Get get = new Get(row); - get.setClosestRowBefore(true); get.addFamily(f); r = ht.get(get); assertTrue(r.toString(), r.containsColumn(f, f)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index 6707354976c..14b43df893a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -102,8 +102,6 @@ public class SimpleRegionObserver extends BaseRegionObserver { final AtomicInteger ctPreDeleted = new AtomicInteger(0); final AtomicInteger ctPrePrepareDeleteTS = new AtomicInteger(0); final AtomicInteger ctPostDeleted = new AtomicInteger(0); - final AtomicInteger ctPreGetClosestRowBefore = new AtomicInteger(0); - final AtomicInteger ctPostGetClosestRowBefore = new AtomicInteger(0); final AtomicInteger ctPreIncrement = new AtomicInteger(0); final AtomicInteger ctPreIncrementAfterRowLock = new AtomicInteger(0); final AtomicInteger ctPreAppend = new AtomicInteger(0); @@ -517,32 +515,6 @@ public class SimpleRegionObserver extends BaseRegionObserver { ctPostBatchMutateIndispensably.incrementAndGet(); } - @Override - public void preGetClosestRowBefore(final ObserverContext c, - final byte[] row, final byte[] family, final Result result) - throws IOException { - RegionCoprocessorEnvironment e = c.getEnvironment(); - assertNotNull(e); - assertNotNull(e.getRegion()); - assertNotNull(row); - assertNotNull(result); - if (ctBeforeDelete.get() > 0) { - ctPreGetClosestRowBefore.incrementAndGet(); - } - } - - @Override - public void postGetClosestRowBefore(final ObserverContext c, - final byte[] row, final byte[] family, final Result result) - throws IOException { - RegionCoprocessorEnvironment e = c.getEnvironment(); - assertNotNull(e); - assertNotNull(e.getRegion()); - assertNotNull(row); - assertNotNull(result); - ctPostGetClosestRowBefore.incrementAndGet(); - } - @Override public Result preIncrement(final ObserverContext c, final Increment increment) throws IOException { @@ -940,14 +912,6 @@ public class SimpleRegionObserver extends BaseRegionObserver { return ctPostDeleted.get(); } - public int getCtPreGetClosestRowBefore() { - return ctPreGetClosestRowBefore.get(); - } - - public int getCtPostGetClosestRowBefore() { - return ctPostGetClosestRowBefore.get(); - } - public int getCtPreIncrement() { return ctPreIncrement.get(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 418e2fca645..470a453a489 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; @@ -66,6 +65,8 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { private static final byte[] T35 = Bytes.toBytes("035"); private static final byte[] T40 = Bytes.toBytes("040"); + private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); + @Test @@ -160,7 +161,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { tableb, tofindBytes, HConstants.NINES, false); LOG.info("find=" + new String(metaKey)); - Result r = mr.getClosestRowBefore(metaKey, HConstants.CATALOG_FAMILY); + Result r = UTIL.getClosestRowBefore(mr, metaKey, HConstants.CATALOG_FAMILY); if (answer == -1) { assertNull(r); return null; @@ -206,38 +207,38 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { p.add(c0, c0, T20); region.put(p); - Result r = region.getClosestRowBefore(T20, c0); + Result r = UTIL.getClosestRowBefore(region, T20, c0); assertTrue(Bytes.equals(T20, r.getRow())); Delete d = new Delete(T20); d.deleteColumn(c0, c0); region.delete(d); - r = region.getClosestRowBefore(T20, c0); + r = UTIL.getClosestRowBefore(region, T20, c0); assertTrue(Bytes.equals(T10, r.getRow())); p = new Put(T30); p.add(c0, c0, T30); region.put(p); - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T30, r.getRow())); d = new Delete(T30); d.deleteColumn(c0, c0); region.delete(d); - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); region.flush(true); // try finding "010" after flush - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); // Put into a different column family. Should make it so I still get t10 @@ -245,16 +246,16 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { p.add(c1, c1, T20); region.put(p); - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); region.flush(true); - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); // Now try combo of memcache and mapfiles. Delete the t20 COLUMS[1] @@ -262,14 +263,14 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { d = new Delete(T20); d.deleteColumn(c1, c1); region.delete(d); - r = region.getClosestRowBefore(T30, c0); + r = UTIL.getClosestRowBefore(region, T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); // Ask for a value off the end of the file. Should return t10. - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); region.flush(true); - r = region.getClosestRowBefore(T31, c0); + r = UTIL.getClosestRowBefore(region, T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); // Ok. Let the candidate come out of hfile but have delete of @@ -279,7 +280,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { region.put(p); d = new Delete(T10); d.deleteColumn(c1, c1); - r = region.getClosestRowBefore(T12, c0); + r = UTIL.getClosestRowBefore(region, T12, c0); assertTrue(Bytes.equals(T11, r.getRow())); } finally { if (region != null) { @@ -316,13 +317,13 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { region.put(p); // try finding "035" - Result r = region.getClosestRowBefore(T35, c0); + Result r = UTIL.getClosestRowBefore(region, T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); region.flush(true); // try finding "035" - r = region.getClosestRowBefore(T35, c0); + r = UTIL.getClosestRowBefore(region, T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); p = new Put(T20); @@ -330,13 +331,13 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { region.put(p); // try finding "035" - r = region.getClosestRowBefore(T35, c0); + r = UTIL.getClosestRowBefore(region, T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); region.flush(true); // try finding "035" - r = region.getClosestRowBefore(T35, c0); + r = UTIL.getClosestRowBefore(region, T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); } finally { if (region != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java index 7f8a20b11fb..cd84eac790f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java @@ -89,20 +89,20 @@ public class TestMinVersions { // now make sure that getClosestBefore(...) get can // rows that would be expired without minVersion. // also make sure it gets the latest version - Result r = region.getClosestRowBefore(T1, c0); + Result r = hbu.getClosestRowBefore(region, T1, c0); checkResult(r, c0, T4); - r = region.getClosestRowBefore(T2, c0); + r = hbu.getClosestRowBefore(region, T2, c0); checkResult(r, c0, T4); // now flush/compact region.flush(true); region.compact(true); - r = region.getClosestRowBefore(T1, c0); + r = hbu.getClosestRowBefore(region, T1, c0); checkResult(r, c0, T4); - r = region.getClosestRowBefore(T2, c0); + r = hbu.getClosestRowBefore(region, T2, c0); checkResult(r, c0, T4); } finally { HBaseTestingUtility.closeRegionAndWAL(region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java index bd8cf64cf6c..8fd3f6707e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java @@ -879,16 +879,6 @@ public class TestWithDisabledAuthorization extends SecureTestUtil { } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); - // preGetClosestRowBefore - verifyAllowed(new AccessTestAction() { - @Override - public Object run() throws Exception { - ACCESS_CONTROLLER.preGetClosestRowBefore(ObserverContext.createAndPrepare(RCP_ENV, null), - TEST_ROW, TEST_FAMILY, new Result()); - return null; - } - }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); - // preGetOp verifyAllowed(new AccessTestAction() { @Override diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java index 7b0ca04f497..c96e1fd9f2a 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java @@ -1610,25 +1610,12 @@ public class ThriftServerRunner implements Runnable { } } - @Deprecated - @Override - public List getRowOrBefore(ByteBuffer tableName, ByteBuffer row, - ByteBuffer family) throws IOError { - try { - Result result = getRowOrBefore(getBytes(tableName), getBytes(row), getBytes(family)); - return ThriftUtilities.cellFromHBase(result.rawCells()); - } catch (IOException e) { - LOG.warn(e.getMessage(), e); - throw new IOError(Throwables.getStackTraceAsString(e)); - } - } - @Override public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError { try { byte[] row = getBytes(searchRow); - Result startRowResult = - getRowOrBefore(TableName.META_TABLE_NAME.getName(), row, HConstants.CATALOG_FAMILY); + Result startRowResult = getReverseScanResult(TableName.META_TABLE_NAME.getName(), row, + HConstants.CATALOG_FAMILY); if (startRowResult == null) { throw new IOException("Cannot find row in "+ TableName.META_TABLE_NAME+", row=" @@ -1662,7 +1649,8 @@ public class ThriftServerRunner implements Runnable { } } - private Result getRowOrBefore(byte[] tableName, byte[] row, byte[] family) throws IOException { + private Result getReverseScanResult(byte[] tableName, byte[] row, byte[] family) + throws IOException { Scan scan = new Scan(row); scan.setReversed(true); scan.addFamily(family); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java index db48a622c9a..857f5f71388 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java @@ -564,6 +564,8 @@ public class Hbase { * * @throws IllegalArgument if ScannerID is invalid * + * @throws NotFound when the scanner reaches the end + * * @param id id of a scanner returned by scannerOpen */ public List scannerGet(int id) throws IOError, IllegalArgument, org.apache.thrift.TException; @@ -578,6 +580,8 @@ public class Hbase { * * @throws IllegalArgument if ScannerID is invalid * + * @throws NotFound when the scanner reaches the end + * * @param id id of a scanner returned by scannerOpen * * @param nbRows number of results to return @@ -593,19 +597,6 @@ public class Hbase { */ public void scannerClose(int id) throws IOError, IllegalArgument, org.apache.thrift.TException; - /** - * Get the row just before the specified one. - * - * @return value for specified row/column - * - * @param tableName name of table - * - * @param row row key - * - * @param family column name - */ - public List getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family) throws IOError, org.apache.thrift.TException; - /** * Get the regininfo for the specified row. It scans * the metatable to find region's start and end keys. @@ -733,8 +724,6 @@ public class Hbase { public void scannerClose(int id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void getRegionInfo(ByteBuffer row, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void append(TAppend append, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -1893,34 +1882,6 @@ public class Hbase { return; } - public List getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family) throws IOError, org.apache.thrift.TException - { - send_getRowOrBefore(tableName, row, family); - return recv_getRowOrBefore(); - } - - public void send_getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family) throws org.apache.thrift.TException - { - getRowOrBefore_args args = new getRowOrBefore_args(); - args.setTableName(tableName); - args.setRow(row); - args.setFamily(family); - sendBase("getRowOrBefore", args); - } - - public List recv_getRowOrBefore() throws IOError, org.apache.thrift.TException - { - getRowOrBefore_result result = new getRowOrBefore_result(); - receiveBase(result, "getRowOrBefore"); - if (result.isSetSuccess()) { - return result.success; - } - if (result.io != null) { - throw result.io; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRowOrBefore failed: unknown result"); - } - public TRegionInfo getRegionInfo(ByteBuffer row) throws IOError, org.apache.thrift.TException { send_getRegionInfo(row); @@ -3592,44 +3553,6 @@ public class Hbase { } } - public void getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - getRowOrBefore_call method_call = new getRowOrBefore_call(tableName, row, family, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class getRowOrBefore_call extends org.apache.thrift.async.TAsyncMethodCall { - private ByteBuffer tableName; - private ByteBuffer row; - private ByteBuffer family; - public getRowOrBefore_call(ByteBuffer tableName, ByteBuffer row, ByteBuffer family, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.tableName = tableName; - this.row = row; - this.family = family; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRowOrBefore", org.apache.thrift.protocol.TMessageType.CALL, 0)); - getRowOrBefore_args args = new getRowOrBefore_args(); - args.setTableName(tableName); - args.setRow(row); - args.setFamily(family); - args.write(prot); - prot.writeMessageEnd(); - } - - public List getResult() throws IOError, org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_getRowOrBefore(); - } - } - public void getRegionInfo(ByteBuffer row, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); getRegionInfo_call method_call = new getRegionInfo_call(row, resultHandler, this, ___protocolFactory, ___transport); @@ -3795,7 +3718,6 @@ public class Hbase { processMap.put("scannerGet", new scannerGet()); processMap.put("scannerGetList", new scannerGetList()); processMap.put("scannerClose", new scannerClose()); - processMap.put("getRowOrBefore", new getRowOrBefore()); processMap.put("getRegionInfo", new getRegionInfo()); processMap.put("append", new append()); processMap.put("checkAndPut", new checkAndPut()); @@ -4814,30 +4736,6 @@ public class Hbase { } } - public static class getRowOrBefore extends org.apache.thrift.ProcessFunction { - public getRowOrBefore() { - super("getRowOrBefore"); - } - - public getRowOrBefore_args getEmptyArgsInstance() { - return new getRowOrBefore_args(); - } - - protected boolean isOneway() { - return false; - } - - public getRowOrBefore_result getResult(I iface, getRowOrBefore_args args) throws org.apache.thrift.TException { - getRowOrBefore_result result = new getRowOrBefore_result(); - try { - result.success = iface.getRowOrBefore(args.tableName, args.row, args.family); - } catch (IOError io) { - result.io = io; - } - return result; - } - } - public static class getRegionInfo extends org.apache.thrift.ProcessFunction { public getRegionInfo() { super("getRegionInfo"); @@ -52660,1165 +52558,6 @@ public class Hbase { } - public static class getRowOrBefore_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowOrBefore_args"); - - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING, (short)3); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new getRowOrBefore_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getRowOrBefore_argsTupleSchemeFactory()); - } - - /** - * name of table - */ - public ByteBuffer tableName; // required - /** - * row key - */ - public ByteBuffer row; // required - /** - * column name - */ - public ByteBuffer family; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * name of table - */ - TABLE_NAME((short)1, "tableName"), - /** - * row key - */ - ROW((short)2, "row"), - /** - * column name - */ - FAMILY((short)3, "family"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // TABLE_NAME - return TABLE_NAME; - case 2: // ROW - return ROW; - case 3: // FAMILY - return FAMILY; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text"))); - tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text"))); - tmpMap.put(_Fields.FAMILY, new org.apache.thrift.meta_data.FieldMetaData("family", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowOrBefore_args.class, metaDataMap); - } - - public getRowOrBefore_args() { - } - - public getRowOrBefore_args( - ByteBuffer tableName, - ByteBuffer row, - ByteBuffer family) - { - this(); - this.tableName = tableName; - this.row = row; - this.family = family; - } - - /** - * Performs a deep copy on other. - */ - public getRowOrBefore_args(getRowOrBefore_args other) { - if (other.isSetTableName()) { - this.tableName = other.tableName; - } - if (other.isSetRow()) { - this.row = other.row; - } - if (other.isSetFamily()) { - this.family = other.family; - } - } - - public getRowOrBefore_args deepCopy() { - return new getRowOrBefore_args(this); - } - - @Override - public void clear() { - this.tableName = null; - this.row = null; - this.family = null; - } - - /** - * name of table - */ - public byte[] getTableName() { - setTableName(org.apache.thrift.TBaseHelper.rightSize(tableName)); - return tableName == null ? null : tableName.array(); - } - - public ByteBuffer bufferForTableName() { - return tableName; - } - - /** - * name of table - */ - public getRowOrBefore_args setTableName(byte[] tableName) { - setTableName(tableName == null ? (ByteBuffer)null : ByteBuffer.wrap(tableName)); - return this; - } - - public getRowOrBefore_args setTableName(ByteBuffer tableName) { - this.tableName = tableName; - return this; - } - - public void unsetTableName() { - this.tableName = null; - } - - /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableName() { - return this.tableName != null; - } - - public void setTableNameIsSet(boolean value) { - if (!value) { - this.tableName = null; - } - } - - /** - * row key - */ - public byte[] getRow() { - setRow(org.apache.thrift.TBaseHelper.rightSize(row)); - return row == null ? null : row.array(); - } - - public ByteBuffer bufferForRow() { - return row; - } - - /** - * row key - */ - public getRowOrBefore_args setRow(byte[] row) { - setRow(row == null ? (ByteBuffer)null : ByteBuffer.wrap(row)); - return this; - } - - public getRowOrBefore_args setRow(ByteBuffer row) { - this.row = row; - return this; - } - - public void unsetRow() { - this.row = null; - } - - /** Returns true if field row is set (has been assigned a value) and false otherwise */ - public boolean isSetRow() { - return this.row != null; - } - - public void setRowIsSet(boolean value) { - if (!value) { - this.row = null; - } - } - - /** - * column name - */ - public byte[] getFamily() { - setFamily(org.apache.thrift.TBaseHelper.rightSize(family)); - return family == null ? null : family.array(); - } - - public ByteBuffer bufferForFamily() { - return family; - } - - /** - * column name - */ - public getRowOrBefore_args setFamily(byte[] family) { - setFamily(family == null ? (ByteBuffer)null : ByteBuffer.wrap(family)); - return this; - } - - public getRowOrBefore_args setFamily(ByteBuffer family) { - this.family = family; - return this; - } - - public void unsetFamily() { - this.family = null; - } - - /** Returns true if field family is set (has been assigned a value) and false otherwise */ - public boolean isSetFamily() { - return this.family != null; - } - - public void setFamilyIsSet(boolean value) { - if (!value) { - this.family = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case TABLE_NAME: - if (value == null) { - unsetTableName(); - } else { - setTableName((ByteBuffer)value); - } - break; - - case ROW: - if (value == null) { - unsetRow(); - } else { - setRow((ByteBuffer)value); - } - break; - - case FAMILY: - if (value == null) { - unsetFamily(); - } else { - setFamily((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case TABLE_NAME: - return getTableName(); - - case ROW: - return getRow(); - - case FAMILY: - return getFamily(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case TABLE_NAME: - return isSetTableName(); - case ROW: - return isSetRow(); - case FAMILY: - return isSetFamily(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof getRowOrBefore_args) - return this.equals((getRowOrBefore_args)that); - return false; - } - - public boolean equals(getRowOrBefore_args that) { - if (that == null) - return false; - - boolean this_present_tableName = true && this.isSetTableName(); - boolean that_present_tableName = true && that.isSetTableName(); - if (this_present_tableName || that_present_tableName) { - if (!(this_present_tableName && that_present_tableName)) - return false; - if (!this.tableName.equals(that.tableName)) - return false; - } - - boolean this_present_row = true && this.isSetRow(); - boolean that_present_row = true && that.isSetRow(); - if (this_present_row || that_present_row) { - if (!(this_present_row && that_present_row)) - return false; - if (!this.row.equals(that.row)) - return false; - } - - boolean this_present_family = true && this.isSetFamily(); - boolean that_present_family = true && that.isSetFamily(); - if (this_present_family || that_present_family) { - if (!(this_present_family && that_present_family)) - return false; - if (!this.family.equals(that.family)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_tableName = true && (isSetTableName()); - builder.append(present_tableName); - if (present_tableName) - builder.append(tableName); - - boolean present_row = true && (isSetRow()); - builder.append(present_row); - if (present_row) - builder.append(row); - - boolean present_family = true && (isSetFamily()); - builder.append(present_family); - if (present_family) - builder.append(family); - - return builder.toHashCode(); - } - - public int compareTo(getRowOrBefore_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - getRowOrBefore_args typedOther = (getRowOrBefore_args)other; - - lastComparison = Boolean.valueOf(isSetTableName()).compareTo(typedOther.isSetTableName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, typedOther.tableName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetRow()).compareTo(typedOther.isSetRow()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRow()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row, typedOther.row); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetFamily()).compareTo(typedOther.isSetFamily()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetFamily()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.family, typedOther.family); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("getRowOrBefore_args("); - boolean first = true; - - sb.append("tableName:"); - if (this.tableName == null) { - sb.append("null"); - } else { - sb.append(this.tableName); - } - first = false; - if (!first) sb.append(", "); - sb.append("row:"); - if (this.row == null) { - sb.append("null"); - } else { - sb.append(this.row); - } - first = false; - if (!first) sb.append(", "); - sb.append("family:"); - if (this.family == null) { - sb.append("null"); - } else { - sb.append(this.family); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class getRowOrBefore_argsStandardSchemeFactory implements SchemeFactory { - public getRowOrBefore_argsStandardScheme getScheme() { - return new getRowOrBefore_argsStandardScheme(); - } - } - - private static class getRowOrBefore_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, getRowOrBefore_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // TABLE_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableName = iprot.readBinary(); - struct.setTableNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // ROW - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.row = iprot.readBinary(); - struct.setRowIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // FAMILY - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.family = iprot.readBinary(); - struct.setFamilyIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - - // check for required fields of primitive type, which can't be checked in the validate method - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, getRowOrBefore_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.tableName != null) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - oprot.writeBinary(struct.tableName); - oprot.writeFieldEnd(); - } - if (struct.row != null) { - oprot.writeFieldBegin(ROW_FIELD_DESC); - oprot.writeBinary(struct.row); - oprot.writeFieldEnd(); - } - if (struct.family != null) { - oprot.writeFieldBegin(FAMILY_FIELD_DESC); - oprot.writeBinary(struct.family); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class getRowOrBefore_argsTupleSchemeFactory implements SchemeFactory { - public getRowOrBefore_argsTupleScheme getScheme() { - return new getRowOrBefore_argsTupleScheme(); - } - } - - private static class getRowOrBefore_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getRowOrBefore_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetTableName()) { - optionals.set(0); - } - if (struct.isSetRow()) { - optionals.set(1); - } - if (struct.isSetFamily()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetTableName()) { - oprot.writeBinary(struct.tableName); - } - if (struct.isSetRow()) { - oprot.writeBinary(struct.row); - } - if (struct.isSetFamily()) { - oprot.writeBinary(struct.family); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getRowOrBefore_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); - if (incoming.get(0)) { - struct.tableName = iprot.readBinary(); - struct.setTableNameIsSet(true); - } - if (incoming.get(1)) { - struct.row = iprot.readBinary(); - struct.setRowIsSet(true); - } - if (incoming.get(2)) { - struct.family = iprot.readBinary(); - struct.setFamilyIsSet(true); - } - } - } - - } - - public static class getRowOrBefore_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowOrBefore_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); - private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new getRowOrBefore_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getRowOrBefore_resultTupleSchemeFactory()); - } - - public List success; // required - public IOError io; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"), - IO((short)1, "io"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - case 1: // IO - return IO; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCell.class)))); - tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowOrBefore_result.class, metaDataMap); - } - - public getRowOrBefore_result() { - } - - public getRowOrBefore_result( - List success, - IOError io) - { - this(); - this.success = success; - this.io = io; - } - - /** - * Performs a deep copy on other. - */ - public getRowOrBefore_result(getRowOrBefore_result other) { - if (other.isSetSuccess()) { - List __this__success = new ArrayList(); - for (TCell other_element : other.success) { - __this__success.add(new TCell(other_element)); - } - this.success = __this__success; - } - if (other.isSetIo()) { - this.io = new IOError(other.io); - } - } - - public getRowOrBefore_result deepCopy() { - return new getRowOrBefore_result(this); - } - - @Override - public void clear() { - this.success = null; - this.io = null; - } - - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(TCell elem) { - if (this.success == null) { - this.success = new ArrayList(); - } - this.success.add(elem); - } - - public List getSuccess() { - return this.success; - } - - public getRowOrBefore_result setSuccess(List success) { - this.success = success; - return this; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public IOError getIo() { - return this.io; - } - - public getRowOrBefore_result setIo(IOError io) { - this.io = io; - return this; - } - - public void unsetIo() { - this.io = null; - } - - /** Returns true if field io is set (has been assigned a value) and false otherwise */ - public boolean isSetIo() { - return this.io != null; - } - - public void setIoIsSet(boolean value) { - if (!value) { - this.io = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((List)value); - } - break; - - case IO: - if (value == null) { - unsetIo(); - } else { - setIo((IOError)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - case IO: - return getIo(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - case IO: - return isSetIo(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof getRowOrBefore_result) - return this.equals((getRowOrBefore_result)that); - return false; - } - - public boolean equals(getRowOrBefore_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - boolean this_present_io = true && this.isSetIo(); - boolean that_present_io = true && that.isSetIo(); - if (this_present_io || that_present_io) { - if (!(this_present_io && that_present_io)) - return false; - if (!this.io.equals(that.io)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - boolean present_io = true && (isSetIo()); - builder.append(present_io); - if (present_io) - builder.append(io); - - return builder.toHashCode(); - } - - public int compareTo(getRowOrBefore_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - getRowOrBefore_result typedOther = (getRowOrBefore_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetIo()).compareTo(typedOther.isSetIo()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetIo()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.io, typedOther.io); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("getRowOrBefore_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - if (!first) sb.append(", "); - sb.append("io:"); - if (this.io == null) { - sb.append("null"); - } else { - sb.append(this.io); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class getRowOrBefore_resultStandardSchemeFactory implements SchemeFactory { - public getRowOrBefore_resultStandardScheme getScheme() { - return new getRowOrBefore_resultStandardScheme(); - } - } - - private static class getRowOrBefore_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, getRowOrBefore_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list582 = iprot.readListBegin(); - struct.success = new ArrayList(_list582.size); - for (int _i583 = 0; _i583 < _list582.size; ++_i583) - { - TCell _elem584; // required - _elem584 = new TCell(); - _elem584.read(iprot); - struct.success.add(_elem584); - } - iprot.readListEnd(); - } - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 1: // IO - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.io = new IOError(); - struct.io.read(iprot); - struct.setIoIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - - // check for required fields of primitive type, which can't be checked in the validate method - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, getRowOrBefore_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TCell _iter585 : struct.success) - { - _iter585.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.io != null) { - oprot.writeFieldBegin(IO_FIELD_DESC); - struct.io.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class getRowOrBefore_resultTupleSchemeFactory implements SchemeFactory { - public getRowOrBefore_resultTupleScheme getScheme() { - return new getRowOrBefore_resultTupleScheme(); - } - } - - private static class getRowOrBefore_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getRowOrBefore_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - if (struct.isSetIo()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (TCell _iter586 : struct.success) - { - _iter586.write(oprot); - } - } - } - if (struct.isSetIo()) { - struct.io.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getRowOrBefore_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list587.size); - for (int _i588 = 0; _i588 < _list587.size; ++_i588) - { - TCell _elem589; // required - _elem589 = new TCell(); - _elem589.read(iprot); - struct.success.add(_elem589); - } - } - struct.setSuccessIsSet(true); - } - if (incoming.get(1)) { - struct.io = new IOError(); - struct.io.read(iprot); - struct.setIoIsSet(true); - } - } - } - - } - public static class getRegionInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRegionInfo_args"); @@ -55451,14 +54190,14 @@ public class Hbase { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list590 = iprot.readListBegin(); - struct.success = new ArrayList(_list590.size); - for (int _i591 = 0; _i591 < _list590.size; ++_i591) + org.apache.thrift.protocol.TList _list582 = iprot.readListBegin(); + struct.success = new ArrayList(_list582.size); + for (int _i583 = 0; _i583 < _list582.size; ++_i583) { - TCell _elem592; // required - _elem592 = new TCell(); - _elem592.read(iprot); - struct.success.add(_elem592); + TCell _elem584; // required + _elem584 = new TCell(); + _elem584.read(iprot); + struct.success.add(_elem584); } iprot.readListEnd(); } @@ -55495,9 +54234,9 @@ public class Hbase { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TCell _iter593 : struct.success) + for (TCell _iter585 : struct.success) { - _iter593.write(oprot); + _iter585.write(oprot); } oprot.writeListEnd(); } @@ -55536,9 +54275,9 @@ public class Hbase { if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TCell _iter594 : struct.success) + for (TCell _iter586 : struct.success) { - _iter594.write(oprot); + _iter586.write(oprot); } } } @@ -55553,14 +54292,14 @@ public class Hbase { BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list595 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list595.size); - for (int _i596 = 0; _i596 < _list595.size; ++_i596) + org.apache.thrift.protocol.TList _list587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list587.size); + for (int _i588 = 0; _i588 < _list587.size; ++_i588) { - TCell _elem597; // required - _elem597 = new TCell(); - _elem597.read(iprot); - struct.success.add(_elem597); + TCell _elem589; // required + _elem589 = new TCell(); + _elem589.read(iprot); + struct.success.add(_elem589); } } struct.setSuccessIsSet(true); @@ -56470,15 +55209,15 @@ public class Hbase { case 7: // ATTRIBUTES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map598 = iprot.readMapBegin(); - struct.attributes = new HashMap(2*_map598.size); - for (int _i599 = 0; _i599 < _map598.size; ++_i599) + org.apache.thrift.protocol.TMap _map590 = iprot.readMapBegin(); + struct.attributes = new HashMap(2*_map590.size); + for (int _i591 = 0; _i591 < _map590.size; ++_i591) { - ByteBuffer _key600; // required - ByteBuffer _val601; // required - _key600 = iprot.readBinary(); - _val601 = iprot.readBinary(); - struct.attributes.put(_key600, _val601); + ByteBuffer _key592; // required + ByteBuffer _val593; // required + _key592 = iprot.readBinary(); + _val593 = iprot.readBinary(); + struct.attributes.put(_key592, _val593); } iprot.readMapEnd(); } @@ -56531,10 +55270,10 @@ public class Hbase { oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size())); - for (Map.Entry _iter602 : struct.attributes.entrySet()) + for (Map.Entry _iter594 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter602.getKey()); - oprot.writeBinary(_iter602.getValue()); + oprot.writeBinary(_iter594.getKey()); + oprot.writeBinary(_iter594.getValue()); } oprot.writeMapEnd(); } @@ -56595,10 +55334,10 @@ public class Hbase { if (struct.isSetAttributes()) { { oprot.writeI32(struct.attributes.size()); - for (Map.Entry _iter603 : struct.attributes.entrySet()) + for (Map.Entry _iter595 : struct.attributes.entrySet()) { - oprot.writeBinary(_iter603.getKey()); - oprot.writeBinary(_iter603.getValue()); + oprot.writeBinary(_iter595.getKey()); + oprot.writeBinary(_iter595.getValue()); } } } @@ -56631,15 +55370,15 @@ public class Hbase { } if (incoming.get(5)) { { - org.apache.thrift.protocol.TMap _map604 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.attributes = new HashMap(2*_map604.size); - for (int _i605 = 0; _i605 < _map604.size; ++_i605) + org.apache.thrift.protocol.TMap _map596 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.attributes = new HashMap(2*_map596.size); + for (int _i597 = 0; _i597 < _map596.size; ++_i597) { - ByteBuffer _key606; // required - ByteBuffer _val607; // required - _key606 = iprot.readBinary(); - _val607 = iprot.readBinary(); - struct.attributes.put(_key606, _val607); + ByteBuffer _key598; // required + ByteBuffer _val599; // required + _key598 = iprot.readBinary(); + _val599 = iprot.readBinary(); + struct.attributes.put(_key598, _val599); } } struct.setAttributesIsSet(true); diff --git a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift index f4e48ff0d59..9cab7ff6ef0 100644 --- a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift +++ b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift @@ -906,22 +906,6 @@ service Hbase { 1:ScannerID id ) throws (1:IOError io, 2:IllegalArgument ia) - /** - * Get the row just before the specified one. - * - * @return value for specified row/column - */ - list getRowOrBefore( - /** name of table */ - 1:Text tableName, - - /** row key */ - 2:Text row, - - /** column name */ - 3:Text family - ) throws (1:IOError io) - /** * Get the regininfo for the specified row. It scans * the metatable to find region's start and end keys.