From 2f1923a8233b0c999494cd4b33f85b70dc5d7b12 Mon Sep 17 00:00:00 2001 From: Phil Yang Date: Thu, 25 May 2017 15:18:58 +0800 Subject: [PATCH] HBASE-15576 Scanning cursor to prevent blocking long time on ResultScanner.next() --- .../hadoop/hbase/client/ClientScanner.java | 15 + .../hadoop/hbase/client/ClientUtil.java | 4 + .../apache/hadoop/hbase/client/Cursor.java | 41 + .../apache/hadoop/hbase/client/Result.java | 45 + .../org/apache/hadoop/hbase/client/Scan.java | 43 + .../hadoop/hbase/client/ScannerCallable.java | 14 +- .../client/ScannerCallableWithReplicas.java | 4 + .../hbase/shaded/protobuf/ProtobufUtil.java | 24 + .../protobuf/generated/ClientProtos.java | 1086 +++++++++++++++-- .../src/main/protobuf/Client.proto | 14 +- .../protobuf/generated/ClientProtos.java | 1058 ++++++++++++++-- hbase-protocol/src/main/protobuf/Client.proto | 14 +- .../hbase/regionserver/RSRpcServices.java | 18 +- .../hbase/regionserver/ScannerContext.java | 13 + .../hbase/regionserver/StoreScanner.java | 1 + .../hbase/regionserver/TestScannerCursor.java | 191 +++ 16 files changed, 2344 insertions(+), 241 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerCursor.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index fa5f8689e37..59cf005cd7a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -499,6 +499,21 @@ public abstract class ClientScanner extends AbstractClientScanner { break; } } + if (cache.isEmpty() && !closed && scan.isNeedCursorResult()) { + if (callable.isHeartbeatMessage() && callable.getCursor() != null) { + // Use cursor row key from server + cache.add(Result.createCursorResult(callable.getCursor())); + break; + } + if (values.length > 0) { + // It is size limit exceed and we need return the last Result's row. + // When user setBatch and the scanner is reopened, the server may return Results that + // user has seen and the last Result can not be seen because the number is not enough. + // So the row keys of results may not be same, we must use the last one. + cache.add(Result.createCursorResult(new Cursor(values[values.length - 1].getRow()))); + break; + } + } if (countdown <= 0) { // we have enough result. closeScannerIfExhausted(regionExhausted); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java index e4a84d5c538..a83908030a2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java @@ -27,4 +27,8 @@ public class ClientUtil { public static boolean areScanStartRowAndStopRowEqual(byte[] startRow, byte[] stopRow) { return startRow != null && startRow.length > 0 && Bytes.equals(startRow, stopRow); } + + public static Cursor createCursor(byte[] row) { + return new Cursor(row); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java new file mode 100644 index 00000000000..1d4b4b5c00a --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Scan cursor to tell client where server is scanning + * {@link Scan#setNeedCursorResult(boolean)} + * {@link Result#isCursor()} + * {@link Result#getCursor()} + */ +@InterfaceAudience.Public +public class Cursor { + + private final byte[] row; + + Cursor(byte[] row) { + this.row = row; + } + + public byte[] getRow() { + return row; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index 63aab807a26..94e1b908aac 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -108,6 +108,8 @@ public class Result implements CellScannable, CellScanner { private final boolean readonly; + private Cursor cursor = null; + /** * Creates an empty Result w/ no KeyValue payload; returns null if you call {@link #rawCells()}. * Use this to represent no results if {@code null} won't do or in old 'mapred' as opposed @@ -173,6 +175,15 @@ public class Result implements CellScannable, CellScanner { return new Result(cells, null, stale, mayHaveMoreCellsInRow); } + public static Result createCursorResult(Cursor cursor) { + return new Result(cursor); + } + + private Result(Cursor cursor) { + this.cursor = cursor; + this.readonly = false; + } + /** Private ctor. Use {@link #create(Cell[])}. */ private Result(Cell[] cells, Boolean exists, boolean stale, boolean mayHaveMoreCellsInRow) { this.cells = cells; @@ -948,4 +959,38 @@ public class Result implements CellScannable, CellScanner { throw new UnsupportedOperationException("Attempting to modify readonly EMPTY_RESULT!"); } } + + /** + * Return true if this Result is a cursor to tell users where the server has scanned. + * In this Result the only meaningful method is {@link #getCursor()}. + * + * {@code + * while (r = scanner.next() && r != null) { + * if(r.isCursor()){ + * // scanning is not end, it is a cursor, save its row key and close scanner if you want, or + * // just continue the loop to call next(). + * } else { + * // just like before + * } + * } + * // scanning is end + * + * } + * {@link Scan#setNeedCursorResult(boolean)} + * {@link Cursor} + * {@link #getCursor()} + */ + public boolean isCursor() { + return cursor != null ; + } + + /** + * Return the cursor if this Result is a cursor result. + * {@link Scan#setNeedCursorResult(boolean)} + * {@link Cursor} + * {@link #isCursor()} + */ + public Cursor getCursor(){ + return cursor; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index 2746263b650..639f43e4e87 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -185,6 +185,9 @@ public class Scan extends Query { * Control whether to use pread at server side. */ private ReadType readType = ReadType.DEFAULT; + + private boolean needCursorResult = false; + /** * Create a Scan operation across all rows. */ @@ -272,6 +275,7 @@ public class Scan extends Query { } this.mvccReadPoint = scan.getMvccReadPoint(); this.limit = scan.getLimit(); + this.needCursorResult = scan.isNeedCursorResult(); } /** @@ -1170,4 +1174,43 @@ public class Scan extends Query { Scan resetMvccReadPoint() { return setMvccReadPoint(-1L); } + + /** + * When the server is slow or we scan a table with many deleted data or we use a sparse filter, + * the server will response heartbeat to prevent timeout. However the scanner will return a Result + * only when client can do it. So if there are many heartbeats, the blocking time on + * ResultScanner#next() may be very long, which is not friendly to online services. + * + * Set this to true then you can get a special Result whose #isCursor() returns true and is not + * contains any real data. It only tells you where the server has scanned. You can call next + * to continue scanning or open a new scanner with this row key as start row whenever you want. + * + * Users can get a cursor when and only when there is a response from the server but we can not + * return a Result to users, for example, this response is a heartbeat or there are partial cells + * but users do not allow partial result. + * + * Now the cursor is in row level which means the special Result will only contains a row key. + * {@link Result#isCursor()} + * {@link Result#getCursor()} + * {@link Cursor} + */ + public Scan setNeedCursorResult(boolean needCursorResult) { + this.needCursorResult = needCursorResult; + return this; + } + + public boolean isNeedCursorResult() { + return needCursorResult; + } + + /** + * Create a new Scan with a cursor. It only set the position information like start row key. + * The others (like cfs, stop row, limit) should still be filled in by the user. + * {@link Result#isCursor()} + * {@link Result#getCursor()} + * {@link Cursor} + */ + public static Scan createScanFromCursor(Cursor cursor) { + return new Scan().withStartRow(cursor.getRow()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index ffac566e23e..4227e41eacb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -87,6 +87,8 @@ public class ScannerCallable extends ClientServiceCallable { */ protected boolean heartbeatMessage = false; + protected Cursor cursor; + // indicate if it is a remote server call protected boolean isRegionServerRemote = true; private long nextCallSeq = 0; @@ -148,7 +150,7 @@ public class ScannerCallable extends ClientServiceCallable { checkIfRegionServerIsRemote(); instantiated = true; } - + cursor = null; // check how often we retry. if (reload) { incRPCRetriesMetrics(scanMetrics, isRegionServerRemote); @@ -242,7 +244,11 @@ public class ScannerCallable extends ClientServiceCallable { response = next(); } long timestamp = System.currentTimeMillis(); - setHeartbeatMessage(response.hasHeartbeatMessage() && response.getHeartbeatMessage()); + boolean isHeartBeat = response.hasHeartbeatMessage() && response.getHeartbeatMessage(); + setHeartbeatMessage(isHeartBeat); + if (isHeartBeat && scan.isNeedCursorResult() && response.hasCursor()) { + cursor = ProtobufUtil.toCursor(response.getCursor()); + } Result[] rrs = ResponseConverter.getResults(getRpcControllerCellScanner(), response); if (logScannerActivity) { long now = System.currentTimeMillis(); @@ -288,6 +294,10 @@ public class ScannerCallable extends ClientServiceCallable { return heartbeatMessage; } + public Cursor getCursor() { + return cursor; + } + private void setHeartbeatMessage(boolean heartbeatMessage) { this.heartbeatMessage = heartbeatMessage; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java index bcd5d21d912..0cdd4ddbef4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java @@ -302,6 +302,10 @@ class ScannerCallableWithReplicas implements RetryingCallable { return currentScannerCallable != null && currentScannerCallable.isHeartbeatMessage(); } + public Cursor getCursor() { + return currentScannerCallable != null ? currentScannerCallable.getCursor() : null; + } + private void addCallsForCurrentReplica( ResultBoundedCompletionService> cs, RegionLocations rl) { RetryingRPC retryingOnReplica = new RetryingRPC(currentScannerCallable); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 8a4e4124307..5c4dd550219 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ClientUtil; import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Consistency; +import org.apache.hadoop.hbase.client.Cursor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -185,6 +186,7 @@ import org.apache.hadoop.hbase.util.Methods; import org.apache.hadoop.hbase.util.NonceKey; import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.mapreduce.tools.CLI; /** * Protobufs utility. @@ -1099,6 +1101,9 @@ public final class ProtobufUtil { if (scan.getReadType() != Scan.ReadType.DEFAULT) { scanBuilder.setReadType(toReadType(scan.getReadType())); } + if (scan.isNeedCursorResult()) { + scanBuilder.setNeedCursorResult(true); + } return scanBuilder.build(); } @@ -1207,9 +1212,28 @@ public final class ProtobufUtil { } else if (proto.hasReadType()) { scan.setReadType(toReadType(proto.getReadType())); } + if (proto.getNeedCursorResult()) { + scan.setNeedCursorResult(true); + } return scan; } + public static ClientProtos.Cursor toCursor(Cursor cursor) { + ClientProtos.Cursor.Builder builder = ClientProtos.Cursor.newBuilder(); + ClientProtos.Cursor.newBuilder().setRow(ByteString.copyFrom(cursor.getRow())); + return builder.build(); + } + + public static ClientProtos.Cursor toCursor(Cell cell) { + return ClientProtos.Cursor.newBuilder() + .setRow(ByteString.copyFrom(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + .build(); + } + + public static Cursor toCursor(ClientProtos.Cursor cursor) { + return ClientUtil.createCursor(cursor.getRow().toByteArray()); + } + /** * Create a protocol buffer Get based on a client Get. * diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java index a5f81e62731..b93f6cc7e04 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java @@ -14598,6 +14598,15 @@ public final class ClientProtos { * optional .hbase.pb.Scan.ReadType readType = 23 [default = DEFAULT]; */ org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Scan.ReadType getReadType(); + + /** + * optional bool need_cursor_result = 24 [default = false]; + */ + boolean hasNeedCursorResult(); + /** + * optional bool need_cursor_result = 24 [default = false]; + */ + boolean getNeedCursorResult(); } /** *
@@ -14642,6 +14651,7 @@ public final class ClientProtos {
       includeStartRow_ = true;
       includeStopRow_ = false;
       readType_ = 0;
+      needCursorResult_ = false;
     }
 
     @java.lang.Override
@@ -14827,6 +14837,11 @@ public final class ClientProtos {
               }
               break;
             }
+            case 192: {
+              bitField0_ |= 0x00100000;
+              needCursorResult_ = input.readBool();
+              break;
+            }
           }
         }
       } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -15387,6 +15402,21 @@ public final class ClientProtos {
       return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Scan.ReadType.DEFAULT : result;
     }
 
+    public static final int NEED_CURSOR_RESULT_FIELD_NUMBER = 24;
+    private boolean needCursorResult_;
+    /**
+     * optional bool need_cursor_result = 24 [default = false];
+     */
+    public boolean hasNeedCursorResult() {
+      return ((bitField0_ & 0x00100000) == 0x00100000);
+    }
+    /**
+     * optional bool need_cursor_result = 24 [default = false];
+     */
+    public boolean getNeedCursorResult() {
+      return needCursorResult_;
+    }
+
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
@@ -15492,6 +15522,9 @@ public final class ClientProtos {
       if (((bitField0_ & 0x00080000) == 0x00080000)) {
         output.writeEnum(23, readType_);
       }
+      if (((bitField0_ & 0x00100000) == 0x00100000)) {
+        output.writeBool(24, needCursorResult_);
+      }
       unknownFields.writeTo(output);
     }
 
@@ -15592,6 +15625,10 @@ public final class ClientProtos {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
           .computeEnumSize(23, readType_);
       }
+      if (((bitField0_ & 0x00100000) == 0x00100000)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeBoolSize(24, needCursorResult_);
+      }
       size += unknownFields.getSerializedSize();
       memoizedSize = size;
       return size;
@@ -15713,6 +15750,11 @@ public final class ClientProtos {
       if (hasReadType()) {
         result = result && readType_ == other.readType_;
       }
+      result = result && (hasNeedCursorResult() == other.hasNeedCursorResult());
+      if (hasNeedCursorResult()) {
+        result = result && (getNeedCursorResult()
+            == other.getNeedCursorResult());
+      }
       result = result && unknownFields.equals(other.unknownFields);
       return result;
     }
@@ -15825,6 +15867,11 @@ public final class ClientProtos {
         hash = (37 * hash) + READTYPE_FIELD_NUMBER;
         hash = (53 * hash) + readType_;
       }
+      if (hasNeedCursorResult()) {
+        hash = (37 * hash) + NEED_CURSOR_RESULT_FIELD_NUMBER;
+        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
+            getNeedCursorResult());
+      }
       hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -16024,6 +16071,8 @@ public final class ClientProtos {
         bitField0_ = (bitField0_ & ~0x00200000);
         readType_ = 0;
         bitField0_ = (bitField0_ & ~0x00400000);
+        needCursorResult_ = false;
+        bitField0_ = (bitField0_ & ~0x00800000);
         return this;
       }
 
@@ -16163,6 +16212,10 @@ public final class ClientProtos {
           to_bitField0_ |= 0x00080000;
         }
         result.readType_ = readType_;
+        if (((from_bitField0_ & 0x00800000) == 0x00800000)) {
+          to_bitField0_ |= 0x00100000;
+        }
+        result.needCursorResult_ = needCursorResult_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -16343,6 +16396,9 @@ public final class ClientProtos {
         if (other.hasReadType()) {
           setReadType(other.getReadType());
         }
+        if (other.hasNeedCursorResult()) {
+          setNeedCursorResult(other.getNeedCursorResult());
+        }
         this.mergeUnknownFields(other.unknownFields);
         onChanged();
         return this;
@@ -17952,6 +18008,38 @@ public final class ClientProtos {
         onChanged();
         return this;
       }
+
+      private boolean needCursorResult_ ;
+      /**
+       * optional bool need_cursor_result = 24 [default = false];
+       */
+      public boolean hasNeedCursorResult() {
+        return ((bitField0_ & 0x00800000) == 0x00800000);
+      }
+      /**
+       * optional bool need_cursor_result = 24 [default = false];
+       */
+      public boolean getNeedCursorResult() {
+        return needCursorResult_;
+      }
+      /**
+       * optional bool need_cursor_result = 24 [default = false];
+       */
+      public Builder setNeedCursorResult(boolean value) {
+        bitField0_ |= 0x00800000;
+        needCursorResult_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * optional bool need_cursor_result = 24 [default = false];
+       */
+      public Builder clearNeedCursorResult() {
+        bitField0_ = (bitField0_ & ~0x00800000);
+        needCursorResult_ = false;
+        onChanged();
+        return this;
+      }
       public final Builder setUnknownFields(
           final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
         return super.setUnknownFields(unknownFields);
@@ -19644,6 +19732,485 @@ public final class ClientProtos {
 
   }
 
+  public interface CursorOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.Cursor)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * optional bytes row = 1;
+     */
+    boolean hasRow();
+    /**
+     * optional bytes row = 1;
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getRow();
+  }
+  /**
+   * 
+   **
+   * Scan cursor to tell client where we are scanning.
+   * 
+ * + * Protobuf type {@code hbase.pb.Cursor} + */ + public static final class Cursor extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.Cursor) + CursorOrBuilder { + // Use Cursor.newBuilder() to construct. + private Cursor(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Cursor() { + row_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Cursor( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + row_ = input.readBytes(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder.class); + } + + private int bitField0_; + public static final int ROW_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString row_; + /** + * optional bytes row = 1; + */ + public boolean hasRow() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bytes row = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getRow() { + return row_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, row_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBytesSize(1, row_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor) obj; + + boolean result = true; + result = result && (hasRow() == other.hasRow()); + if (hasRow()) { + result = result && getRow() + .equals(other.getRow()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRow()) { + hash = (37 * hash) + ROW_FIELD_NUMBER; + hash = (53 * hash) + getRow().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     **
+     * Scan cursor to tell client where we are scanning.
+     * 
+ * + * Protobuf type {@code hbase.pb.Cursor} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.Cursor) + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + row_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.row_ = row_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.getDefaultInstance()) return this; + if (other.hasRow()) { + setRow(other.getRow()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString row_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes row = 1; + */ + public boolean hasRow() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bytes row = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getRow() { + return row_; + } + /** + * optional bytes row = 1; + */ + public Builder setRow(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + row_ = value; + onChanged(); + return this; + } + /** + * optional bytes row = 1; + */ + public Builder clearRow() { + bitField0_ = (bitField0_ & ~0x00000001); + row_ = getDefaultInstance().getRow(); + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.Cursor) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.Cursor) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public Cursor parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new Cursor(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + public interface ScanResponseOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.ScanResponse) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -19919,6 +20486,34 @@ public final class ClientProtos { * optional uint64 mvcc_read_point = 11 [default = 0]; */ long getMvccReadPoint(); + + /** + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + boolean hasCursor(); + /** + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor getCursor(); + /** + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder getCursorOrBuilder(); } /** *
@@ -20077,6 +20672,19 @@ public final class ClientProtos {
               mvccReadPoint_ = input.readUInt64();
               break;
             }
+            case 98: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000100) == 0x00000100)) {
+                subBuilder = cursor_.toBuilder();
+              }
+              cursor_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(cursor_);
+                cursor_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000100;
+              break;
+            }
           }
         }
       } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -20462,6 +21070,42 @@ public final class ClientProtos {
       return mvccReadPoint_;
     }
 
+    public static final int CURSOR_FIELD_NUMBER = 12;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor cursor_;
+    /**
+     * 
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public boolean hasCursor() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor getCursor() { + return cursor_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.getDefaultInstance() : cursor_; + } + /** + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder getCursorOrBuilder() { + return cursor_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.getDefaultInstance() : cursor_; + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; @@ -20507,6 +21151,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeUInt64(11, mvccReadPoint_); } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeMessage(12, getCursor()); + } unknownFields.writeTo(output); } @@ -20566,6 +21213,10 @@ public final class ClientProtos { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream .computeUInt64Size(11, mvccReadPoint_); } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(12, getCursor()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -20629,6 +21280,11 @@ public final class ClientProtos { result = result && (getMvccReadPoint() == other.getMvccReadPoint()); } + result = result && (hasCursor() == other.hasCursor()); + if (hasCursor()) { + result = result && getCursor() + .equals(other.getCursor()); + } result = result && unknownFields.equals(other.unknownFields); return result; } @@ -20690,6 +21346,10 @@ public final class ClientProtos { hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( getMvccReadPoint()); } + if (hasCursor()) { + hash = (37 * hash) + CURSOR_FIELD_NUMBER; + hash = (53 * hash) + getCursor().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -20812,6 +21472,7 @@ public final class ClientProtos { .alwaysUseFieldBuilders) { getResultsFieldBuilder(); getScanMetricsFieldBuilder(); + getCursorFieldBuilder(); } } public Builder clear() { @@ -20846,6 +21507,12 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000200); mvccReadPoint_ = 0L; bitField0_ = (bitField0_ & ~0x00000400); + if (cursorBuilder_ == null) { + cursor_ = null; + } else { + cursorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000800); return this; } @@ -20925,6 +21592,14 @@ public final class ClientProtos { to_bitField0_ |= 0x00000080; } result.mvccReadPoint_ = mvccReadPoint_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000100; + } + if (cursorBuilder_ == null) { + result.cursor_ = cursor_; + } else { + result.cursor_ = cursorBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -21037,6 +21712,9 @@ public final class ClientProtos { if (other.hasMvccReadPoint()) { setMvccReadPoint(other.getMvccReadPoint()); } + if (other.hasCursor()) { + mergeCursor(other.getCursor()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -22156,6 +22834,169 @@ public final class ClientProtos { onChanged(); return this; } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor cursor_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder> cursorBuilder_; + /** + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public boolean hasCursor() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor getCursor() { + if (cursorBuilder_ == null) { + return cursor_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.getDefaultInstance() : cursor_; + } else { + return cursorBuilder_.getMessage(); + } + } + /** + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public Builder setCursor(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor value) { + if (cursorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cursor_ = value; + onChanged(); + } else { + cursorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000800; + return this; + } + /** + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public Builder setCursor( + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder builderForValue) { + if (cursorBuilder_ == null) { + cursor_ = builderForValue.build(); + onChanged(); + } else { + cursorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000800; + return this; + } + /** + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public Builder mergeCursor(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor value) { + if (cursorBuilder_ == null) { + if (((bitField0_ & 0x00000800) == 0x00000800) && + cursor_ != null && + cursor_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.getDefaultInstance()) { + cursor_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.newBuilder(cursor_).mergeFrom(value).buildPartial(); + } else { + cursor_ = value; + } + onChanged(); + } else { + cursorBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000800; + return this; + } + /** + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public Builder clearCursor() { + if (cursorBuilder_ == null) { + cursor_ = null; + onChanged(); + } else { + cursorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000800); + return this; + } + /** + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder getCursorBuilder() { + bitField0_ |= 0x00000800; + onChanged(); + return getCursorFieldBuilder().getBuilder(); + } + /** + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder getCursorOrBuilder() { + if (cursorBuilder_ != null) { + return cursorBuilder_.getMessageOrBuilder(); + } else { + return cursor_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.getDefaultInstance() : cursor_; + } + } + /** + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ * + * optional .hbase.pb.Cursor cursor = 12; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder> + getCursorFieldBuilder() { + if (cursorBuilder_ == null) { + cursorBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder>( + getCursor(), + getParentForChildren(), + isClean()); + cursor_ = null; + } + return cursorBuilder_; + } public final Builder setUnknownFields( final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); @@ -40988,6 +41829,11 @@ public final class ClientProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_ScanRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_Cursor_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_Cursor_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_ScanResponse_descriptor; private static final @@ -41157,7 +42003,7 @@ public final class ClientProtos { "tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" + "_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" + "\030\001 \001(\0132\020.hbase.pb.Result\022\021\n\tprocessed\030\002 " + - "\001(\010\"\203\006\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." + + "\001(\010\"\246\006\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." + "Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" + "eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" + "w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" + @@ -41175,99 +42021,101 @@ public final class ClientProtos { "nt\030\024 \001(\004:\0010\022\037\n\021include_start_row\030\025 \001(\010:\004" + "true\022\037\n\020include_stop_row\030\026 \001(\010:\005false\0222\n" + "\010readType\030\027 \001(\0162\027.hbase.pb.Scan.ReadType" + - ":\007DEFAULT\".\n\010ReadType\022\013\n\007DEFAULT\020\000\022\n\n\006ST" + - "REAM\020\001\022\t\n\005PREAD\020\002\"\300\002\n\013ScanRequest\022)\n\006reg" + - "ion\030\001 \001(\0132\031.hbase.pb.RegionSpecifier\022\034\n\004" + - "scan\030\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_id" + - "\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclose_" + - "scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027" + - "client_handles_partials\030\007 \001(\010\022!\n\031client_", - "handles_heartbeats\030\010 \001(\010\022\032\n\022track_scan_m" + - "etrics\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\022\030\n\rli" + - "mit_of_rows\030\013 \001(\r:\0010\"\266\002\n\014ScanResponse\022\030\n" + - "\020cells_per_result\030\001 \003(\r\022\022\n\nscanner_id\030\002 " + - "\001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022!" + - "\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022\r\n\005sta" + - "le\030\006 \001(\010\022\037\n\027partial_flag_per_result\030\007 \003(" + - "\010\022\036\n\026more_results_in_region\030\010 \001(\010\022\031\n\021hea" + - "rtbeat_message\030\t \001(\010\022+\n\014scan_metrics\030\n \001" + - "(\0132\025.hbase.pb.ScanMetrics\022\032\n\017mvcc_read_p", - "oint\030\013 \001(\004:\0010\"\240\002\n\024BulkLoadHFileRequest\022)" + - "\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifie" + - "r\022>\n\013family_path\030\002 \003(\0132).hbase.pb.BulkLo" + - "adHFileRequest.FamilyPath\022\026\n\016assign_seq_" + - "num\030\003 \001(\010\022+\n\010fs_token\030\004 \001(\0132\031.hbase.pb.D" + - "elegationToken\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tco" + - "py_file\030\006 \001(\010:\005false\032*\n\nFamilyPath\022\016\n\006fa" + - "mily\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFil" + - "eResponse\022\016\n\006loaded\030\001 \002(\010\"V\n\017DelegationT" + - "oken\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010password\030\002 \001", - "(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030\004 \001(\t\"l\n\026Pre" + - "pareBulkLoadRequest\022\'\n\ntable_name\030\001 \002(\0132" + - "\023.hbase.pb.TableName\022)\n\006region\030\002 \001(\0132\031.h" + - "base.pb.RegionSpecifier\"-\n\027PrepareBulkLo" + - "adResponse\022\022\n\nbulk_token\030\001 \002(\t\"W\n\026Cleanu" + - "pBulkLoadRequest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006" + - "region\030\002 \001(\0132\031.hbase.pb.RegionSpecifier\"" + - "\031\n\027CleanupBulkLoadResponse\"a\n\026Coprocesso" + - "rServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service_nam" + - "e\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007request\030", - "\004 \002(\014\"B\n\030CoprocessorServiceResult\022&\n\005val" + - "ue\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"v\n\031Cop" + - "rocessorServiceRequest\022)\n\006region\030\001 \002(\0132\031" + - ".hbase.pb.RegionSpecifier\022.\n\004call\030\002 \002(\0132" + - " .hbase.pb.CoprocessorServiceCall\"o\n\032Cop" + - "rocessorServiceResponse\022)\n\006region\030\001 \002(\0132" + - "\031.hbase.pb.RegionSpecifier\022&\n\005value\030\002 \002(" + - "\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Action\022\r\n" + - "\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.hbase.p" + - "b.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.", - "Get\0226\n\014service_call\030\004 \001(\0132 .hbase.pb.Cop" + - "rocessorServiceCall\"k\n\014RegionAction\022)\n\006r" + - "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\016" + - "\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.hbase.p" + - "b.Action\"c\n\017RegionLoadStats\022\027\n\014memstoreL" + - "oad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035" + - "\n\022compactionPressure\030\003 \001(\005:\0010\"j\n\024MultiRe" + - "gionLoadStats\022)\n\006region\030\001 \003(\0132\031.hbase.pb" + - ".RegionSpecifier\022\'\n\004stat\030\002 \003(\0132\031.hbase.p" + - "b.RegionLoadStats\"\336\001\n\021ResultOrException\022", - "\r\n\005index\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.p" + - "b.Result\022*\n\texception\030\003 \001(\0132\027.hbase.pb.N" + - "ameBytesPair\022:\n\016service_result\030\004 \001(\0132\".h" + - "base.pb.CoprocessorServiceResult\0220\n\tload" + - "Stats\030\005 \001(\0132\031.hbase.pb.RegionLoadStatsB\002" + - "\030\001\"x\n\022RegionActionResult\0226\n\021resultOrExce" + - "ption\030\001 \003(\0132\033.hbase.pb.ResultOrException" + - "\022*\n\texception\030\002 \001(\0132\027.hbase.pb.NameBytes" + - "Pair\"x\n\014MultiRequest\022,\n\014regionAction\030\001 \003" + - "(\0132\026.hbase.pb.RegionAction\022\022\n\nnonceGroup", - "\030\002 \001(\004\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb.Con" + - "dition\"\226\001\n\rMultiResponse\0228\n\022regionAction" + - "Result\030\001 \003(\0132\034.hbase.pb.RegionActionResu" + - "lt\022\021\n\tprocessed\030\002 \001(\010\0228\n\020regionStatistic" + - "s\030\003 \001(\0132\036.hbase.pb.MultiRegionLoadStats*" + - "\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\001" + - "2\263\005\n\rClientService\0222\n\003Get\022\024.hbase.pb.Get" + - "Request\032\025.hbase.pb.GetResponse\022;\n\006Mutate" + - "\022\027.hbase.pb.MutateRequest\032\030.hbase.pb.Mut" + - "ateResponse\0225\n\004Scan\022\025.hbase.pb.ScanReque", - "st\032\026.hbase.pb.ScanResponse\022P\n\rBulkLoadHF" + - "ile\022\036.hbase.pb.BulkLoadHFileRequest\032\037.hb" + - "ase.pb.BulkLoadHFileResponse\022V\n\017PrepareB" + - "ulkLoad\022 .hbase.pb.PrepareBulkLoadReques" + - "t\032!.hbase.pb.PrepareBulkLoadResponse\022V\n\017" + - "CleanupBulkLoad\022 .hbase.pb.CleanupBulkLo" + - "adRequest\032!.hbase.pb.CleanupBulkLoadResp" + - "onse\022X\n\013ExecService\022#.hbase.pb.Coprocess" + - "orServiceRequest\032$.hbase.pb.CoprocessorS" + - "erviceResponse\022d\n\027ExecRegionServerServic", - "e\022#.hbase.pb.CoprocessorServiceRequest\032$" + - ".hbase.pb.CoprocessorServiceResponse\0228\n\005" + - "Multi\022\026.hbase.pb.MultiRequest\032\027.hbase.pb" + - ".MultiResponseBI\n1org.apache.hadoop.hbas" + - "e.shaded.protobuf.generatedB\014ClientProto" + - "sH\001\210\001\001\240\001\001" + ":\007DEFAULT\022!\n\022need_cursor_result\030\030 \001(\010:\005f" + + "alse\".\n\010ReadType\022\013\n\007DEFAULT\020\000\022\n\n\006STREAM\020" + + "\001\022\t\n\005PREAD\020\002\"\300\002\n\013ScanRequest\022)\n\006region\030\001" + + " \001(\0132\031.hbase.pb.RegionSpecifier\022\034\n\004scan\030" + + "\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_id\030\003 \001(" + + "\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclose_scann" + + "er\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027clien", + "t_handles_partials\030\007 \001(\010\022!\n\031client_handl" + + "es_heartbeats\030\010 \001(\010\022\032\n\022track_scan_metric" + + "s\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\022\030\n\rlimit_o" + + "f_rows\030\013 \001(\r:\0010\"\025\n\006Cursor\022\013\n\003row\030\001 \001(\014\"\330" + + "\002\n\014ScanResponse\022\030\n\020cells_per_result\030\001 \003(" + + "\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003 " + + "\001(\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbas" + + "e.pb.Result\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_fl" + + "ag_per_result\030\007 \003(\010\022\036\n\026more_results_in_r" + + "egion\030\010 \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+", + "\n\014scan_metrics\030\n \001(\0132\025.hbase.pb.ScanMetr" + + "ics\022\032\n\017mvcc_read_point\030\013 \001(\004:\0010\022 \n\006curso" + + "r\030\014 \001(\0132\020.hbase.pb.Cursor\"\240\002\n\024BulkLoadHF" + + "ileRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" + + "gionSpecifier\022>\n\013family_path\030\002 \003(\0132).hba" + + "se.pb.BulkLoadHFileRequest.FamilyPath\022\026\n" + + "\016assign_seq_num\030\003 \001(\010\022+\n\010fs_token\030\004 \001(\0132" + + "\031.hbase.pb.DelegationToken\022\022\n\nbulk_token" + + "\030\005 \001(\t\022\030\n\tcopy_file\030\006 \001(\010:\005false\032*\n\nFami" + + "lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025", + "BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"V\n" + + "\017DelegationToken\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010" + + "password\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030" + + "\004 \001(\t\"l\n\026PrepareBulkLoadRequest\022\'\n\ntable" + + "_name\030\001 \002(\0132\023.hbase.pb.TableName\022)\n\006regi" + + "on\030\002 \001(\0132\031.hbase.pb.RegionSpecifier\"-\n\027P" + + "repareBulkLoadResponse\022\022\n\nbulk_token\030\001 \002" + + "(\t\"W\n\026CleanupBulkLoadRequest\022\022\n\nbulk_tok" + + "en\030\001 \002(\t\022)\n\006region\030\002 \001(\0132\031.hbase.pb.Regi" + + "onSpecifier\"\031\n\027CleanupBulkLoadResponse\"a", + "\n\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n" + + "\014service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t" + + "\022\017\n\007request\030\004 \002(\014\"B\n\030CoprocessorServiceR" + + "esult\022&\n\005value\030\001 \001(\0132\027.hbase.pb.NameByte" + + "sPair\"v\n\031CoprocessorServiceRequest\022)\n\006re" + + "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n" + + "\004call\030\002 \002(\0132 .hbase.pb.CoprocessorServic" + + "eCall\"o\n\032CoprocessorServiceResponse\022)\n\006r" + + "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&" + + "\n\005value\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226", + "\001\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001" + + "(\0132\027.hbase.pb.MutationProto\022\032\n\003get\030\003 \001(\013" + + "2\r.hbase.pb.Get\0226\n\014service_call\030\004 \001(\0132 ." + + "hbase.pb.CoprocessorServiceCall\"k\n\014Regio" + + "nAction\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regio" + + "nSpecifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003" + + "(\0132\020.hbase.pb.Action\"c\n\017RegionLoadStats\022" + + "\027\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupanc" + + "y\030\002 \001(\005:\0010\022\035\n\022compactionPressure\030\003 \001(\005:\001" + + "0\"j\n\024MultiRegionLoadStats\022)\n\006region\030\001 \003(", + "\0132\031.hbase.pb.RegionSpecifier\022\'\n\004stat\030\002 \003" + + "(\0132\031.hbase.pb.RegionLoadStats\"\336\001\n\021Result" + + "OrException\022\r\n\005index\030\001 \001(\r\022 \n\006result\030\002 \001" + + "(\0132\020.hbase.pb.Result\022*\n\texception\030\003 \001(\0132" + + "\027.hbase.pb.NameBytesPair\022:\n\016service_resu" + + "lt\030\004 \001(\0132\".hbase.pb.CoprocessorServiceRe" + + "sult\0220\n\tloadStats\030\005 \001(\0132\031.hbase.pb.Regio" + + "nLoadStatsB\002\030\001\"x\n\022RegionActionResult\0226\n\021" + + "resultOrException\030\001 \003(\0132\033.hbase.pb.Resul" + + "tOrException\022*\n\texception\030\002 \001(\0132\027.hbase.", + "pb.NameBytesPair\"x\n\014MultiRequest\022,\n\014regi" + + "onAction\030\001 \003(\0132\026.hbase.pb.RegionAction\022\022" + + "\n\nnonceGroup\030\002 \001(\004\022&\n\tcondition\030\003 \001(\0132\023." + + "hbase.pb.Condition\"\226\001\n\rMultiResponse\0228\n\022" + + "regionActionResult\030\001 \003(\0132\034.hbase.pb.Regi" + + "onActionResult\022\021\n\tprocessed\030\002 \001(\010\0228\n\020reg" + + "ionStatistics\030\003 \001(\0132\036.hbase.pb.MultiRegi" + + "onLoadStats*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014" + + "\n\010TIMELINE\020\0012\263\005\n\rClientService\0222\n\003Get\022\024." + + "hbase.pb.GetRequest\032\025.hbase.pb.GetRespon", + "se\022;\n\006Mutate\022\027.hbase.pb.MutateRequest\032\030." + + "hbase.pb.MutateResponse\0225\n\004Scan\022\025.hbase." + + "pb.ScanRequest\032\026.hbase.pb.ScanResponse\022P" + + "\n\rBulkLoadHFile\022\036.hbase.pb.BulkLoadHFile" + + "Request\032\037.hbase.pb.BulkLoadHFileResponse" + + "\022V\n\017PrepareBulkLoad\022 .hbase.pb.PrepareBu" + + "lkLoadRequest\032!.hbase.pb.PrepareBulkLoad" + + "Response\022V\n\017CleanupBulkLoad\022 .hbase.pb.C" + + "leanupBulkLoadRequest\032!.hbase.pb.Cleanup" + + "BulkLoadResponse\022X\n\013ExecService\022#.hbase.", + "pb.CoprocessorServiceRequest\032$.hbase.pb." + + "CoprocessorServiceResponse\022d\n\027ExecRegion" + + "ServerService\022#.hbase.pb.CoprocessorServ" + + "iceRequest\032$.hbase.pb.CoprocessorService" + + "Response\0228\n\005Multi\022\026.hbase.pb.MultiReques" + + "t\032\027.hbase.pb.MultiResponseBI\n1org.apache" + + ".hadoop.hbase.shaded.protobuf.generatedB" + + "\014ClientProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -41369,21 +42217,27 @@ public final class ClientProtos { internal_static_hbase_pb_Scan_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_Scan_descriptor, - new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", "IncludeStartRow", "IncludeStopRow", "ReadType", }); + new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", "IncludeStartRow", "IncludeStopRow", "ReadType", "NeedCursorResult", }); internal_static_hbase_pb_ScanRequest_descriptor = getDescriptor().getMessageTypes().get(12); internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ScanRequest_descriptor, new java.lang.String[] { "Region", "Scan", "ScannerId", "NumberOfRows", "CloseScanner", "NextCallSeq", "ClientHandlesPartials", "ClientHandlesHeartbeats", "TrackScanMetrics", "Renew", "LimitOfRows", }); - internal_static_hbase_pb_ScanResponse_descriptor = + internal_static_hbase_pb_Cursor_descriptor = getDescriptor().getMessageTypes().get(13); + internal_static_hbase_pb_Cursor_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_Cursor_descriptor, + new java.lang.String[] { "Row", }); + internal_static_hbase_pb_ScanResponse_descriptor = + getDescriptor().getMessageTypes().get(14); internal_static_hbase_pb_ScanResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ScanResponse_descriptor, - new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", "Stale", "PartialFlagPerResult", "MoreResultsInRegion", "HeartbeatMessage", "ScanMetrics", "MvccReadPoint", }); + new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", "Stale", "PartialFlagPerResult", "MoreResultsInRegion", "HeartbeatMessage", "ScanMetrics", "MvccReadPoint", "Cursor", }); internal_static_hbase_pb_BulkLoadHFileRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_BulkLoadHFileRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_BulkLoadHFileRequest_descriptor, @@ -41395,109 +42249,109 @@ public final class ClientProtos { internal_static_hbase_pb_BulkLoadHFileRequest_FamilyPath_descriptor, new java.lang.String[] { "Family", "Path", }); internal_static_hbase_pb_BulkLoadHFileResponse_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(16); internal_static_hbase_pb_BulkLoadHFileResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_BulkLoadHFileResponse_descriptor, new java.lang.String[] { "Loaded", }); internal_static_hbase_pb_DelegationToken_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(17); internal_static_hbase_pb_DelegationToken_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DelegationToken_descriptor, new java.lang.String[] { "Identifier", "Password", "Kind", "Service", }); internal_static_hbase_pb_PrepareBulkLoadRequest_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(18); internal_static_hbase_pb_PrepareBulkLoadRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_PrepareBulkLoadRequest_descriptor, new java.lang.String[] { "TableName", "Region", }); internal_static_hbase_pb_PrepareBulkLoadResponse_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(19); internal_static_hbase_pb_PrepareBulkLoadResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_PrepareBulkLoadResponse_descriptor, new java.lang.String[] { "BulkToken", }); internal_static_hbase_pb_CleanupBulkLoadRequest_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(20); internal_static_hbase_pb_CleanupBulkLoadRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CleanupBulkLoadRequest_descriptor, new java.lang.String[] { "BulkToken", "Region", }); internal_static_hbase_pb_CleanupBulkLoadResponse_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(21); internal_static_hbase_pb_CleanupBulkLoadResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CleanupBulkLoadResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_CoprocessorServiceCall_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(22); internal_static_hbase_pb_CoprocessorServiceCall_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CoprocessorServiceCall_descriptor, new java.lang.String[] { "Row", "ServiceName", "MethodName", "Request", }); internal_static_hbase_pb_CoprocessorServiceResult_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_CoprocessorServiceResult_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CoprocessorServiceResult_descriptor, new java.lang.String[] { "Value", }); internal_static_hbase_pb_CoprocessorServiceRequest_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_CoprocessorServiceRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CoprocessorServiceRequest_descriptor, new java.lang.String[] { "Region", "Call", }); internal_static_hbase_pb_CoprocessorServiceResponse_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(25); internal_static_hbase_pb_CoprocessorServiceResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_CoprocessorServiceResponse_descriptor, new java.lang.String[] { "Region", "Value", }); internal_static_hbase_pb_Action_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(26); internal_static_hbase_pb_Action_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_Action_descriptor, new java.lang.String[] { "Index", "Mutation", "Get", "ServiceCall", }); internal_static_hbase_pb_RegionAction_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(27); internal_static_hbase_pb_RegionAction_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RegionAction_descriptor, new java.lang.String[] { "Region", "Atomic", "Action", }); internal_static_hbase_pb_RegionLoadStats_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(28); internal_static_hbase_pb_RegionLoadStats_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RegionLoadStats_descriptor, new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", "CompactionPressure", }); internal_static_hbase_pb_MultiRegionLoadStats_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(29); internal_static_hbase_pb_MultiRegionLoadStats_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MultiRegionLoadStats_descriptor, new java.lang.String[] { "Region", "Stat", }); internal_static_hbase_pb_ResultOrException_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(30); internal_static_hbase_pb_ResultOrException_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ResultOrException_descriptor, new java.lang.String[] { "Index", "Result", "Exception", "ServiceResult", "LoadStats", }); internal_static_hbase_pb_RegionActionResult_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(31); internal_static_hbase_pb_RegionActionResult_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RegionActionResult_descriptor, new java.lang.String[] { "ResultOrException", "Exception", }); internal_static_hbase_pb_MultiRequest_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(32); internal_static_hbase_pb_MultiRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MultiRequest_descriptor, new java.lang.String[] { "RegionAction", "NonceGroup", "Condition", }); internal_static_hbase_pb_MultiResponse_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(33); internal_static_hbase_pb_MultiResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MultiResponse_descriptor, diff --git a/hbase-protocol-shaded/src/main/protobuf/Client.proto b/hbase-protocol-shaded/src/main/protobuf/Client.proto index 82bfb70a0f0..14d2b4c071c 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Client.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Client.proto @@ -259,13 +259,13 @@ message Scan { optional uint64 mvcc_read_point = 20 [default = 0]; optional bool include_start_row = 21 [default = true]; optional bool include_stop_row = 22 [default = false]; - enum ReadType { DEFAULT = 0; STREAM = 1; PREAD = 2; } optional ReadType readType = 23 [default = DEFAULT]; + optional bool need_cursor_result = 24 [default = false]; } /** @@ -294,6 +294,14 @@ message ScanRequest { optional uint32 limit_of_rows = 11 [default = 0]; } +/** +* Scan cursor to tell client where we are scanning. +* + */ +message Cursor { + optional bytes row = 1; +} + /** * The scan response. If there are no more results, more_results will * be false. If it is not specified, it means there are more. @@ -346,6 +354,10 @@ message ScanResponse { // make use of this mvcc_read_point when restarting a scanner to get a consistent view // of a row. optional uint64 mvcc_read_point = 11 [default = 0]; + + // If the Scan need cursor, return the row key we are scanning in heartbeat message. + // If the Scan doesn't need a cursor, don't set this field to reduce network IO. + optional Cursor cursor = 12; } /** diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index 1bb57c43f40..e25064faff4 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -14267,6 +14267,16 @@ public final class ClientProtos { * optional .hbase.pb.Scan.ReadType readType = 23 [default = DEFAULT]; */ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.ReadType getReadType(); + + // optional bool need_cursor_result = 24 [default = false]; + /** + * optional bool need_cursor_result = 24 [default = false]; + */ + boolean hasNeedCursorResult(); + /** + * optional bool need_cursor_result = 24 [default = false]; + */ + boolean getNeedCursorResult(); } /** * Protobuf type {@code hbase.pb.Scan} @@ -14482,6 +14492,11 @@ public final class ClientProtos { } break; } + case 192: { + bitField0_ |= 0x00100000; + needCursorResult_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -15070,6 +15085,22 @@ public final class ClientProtos { return readType_; } + // optional bool need_cursor_result = 24 [default = false]; + public static final int NEED_CURSOR_RESULT_FIELD_NUMBER = 24; + private boolean needCursorResult_; + /** + * optional bool need_cursor_result = 24 [default = false]; + */ + public boolean hasNeedCursorResult() { + return ((bitField0_ & 0x00100000) == 0x00100000); + } + /** + * optional bool need_cursor_result = 24 [default = false]; + */ + public boolean getNeedCursorResult() { + return needCursorResult_; + } + private void initFields() { column_ = java.util.Collections.emptyList(); attribute_ = java.util.Collections.emptyList(); @@ -15094,6 +15125,7 @@ public final class ClientProtos { includeStartRow_ = true; includeStopRow_ = false; readType_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.ReadType.DEFAULT; + needCursorResult_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -15200,6 +15232,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00080000) == 0x00080000)) { output.writeEnum(23, readType_.getNumber()); } + if (((bitField0_ & 0x00100000) == 0x00100000)) { + output.writeBool(24, needCursorResult_); + } getUnknownFields().writeTo(output); } @@ -15301,6 +15336,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeEnumSize(23, readType_.getNumber()); } + if (((bitField0_ & 0x00100000) == 0x00100000)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(24, needCursorResult_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -15430,6 +15469,11 @@ public final class ClientProtos { result = result && (getReadType() == other.getReadType()); } + result = result && (hasNeedCursorResult() == other.hasNeedCursorResult()); + if (hasNeedCursorResult()) { + result = result && (getNeedCursorResult() + == other.getNeedCursorResult()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -15535,6 +15579,10 @@ public final class ClientProtos { hash = (37 * hash) + READTYPE_FIELD_NUMBER; hash = (53 * hash) + hashEnum(getReadType()); } + if (hasNeedCursorResult()) { + hash = (37 * hash) + NEED_CURSOR_RESULT_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getNeedCursorResult()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -15726,6 +15774,8 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00200000); readType_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.ReadType.DEFAULT; bitField0_ = (bitField0_ & ~0x00400000); + needCursorResult_ = false; + bitField0_ = (bitField0_ & ~0x00800000); return this; } @@ -15869,6 +15919,10 @@ public final class ClientProtos { to_bitField0_ |= 0x00080000; } result.readType_ = readType_; + if (((from_bitField0_ & 0x00800000) == 0x00800000)) { + to_bitField0_ |= 0x00100000; + } + result.needCursorResult_ = needCursorResult_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -16023,6 +16077,9 @@ public final class ClientProtos { if (other.hasReadType()) { setReadType(other.getReadType()); } + if (other.hasNeedCursorResult()) { + setNeedCursorResult(other.getNeedCursorResult()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -17650,6 +17707,39 @@ public final class ClientProtos { return this; } + // optional bool need_cursor_result = 24 [default = false]; + private boolean needCursorResult_ ; + /** + * optional bool need_cursor_result = 24 [default = false]; + */ + public boolean hasNeedCursorResult() { + return ((bitField0_ & 0x00800000) == 0x00800000); + } + /** + * optional bool need_cursor_result = 24 [default = false]; + */ + public boolean getNeedCursorResult() { + return needCursorResult_; + } + /** + * optional bool need_cursor_result = 24 [default = false]; + */ + public Builder setNeedCursorResult(boolean value) { + bitField0_ |= 0x00800000; + needCursorResult_ = value; + onChanged(); + return this; + } + /** + * optional bool need_cursor_result = 24 [default = false]; + */ + public Builder clearNeedCursorResult() { + bitField0_ = (bitField0_ & ~0x00800000); + needCursorResult_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.Scan) } @@ -19299,6 +19389,453 @@ public final class ClientProtos { // @@protoc_insertion_point(class_scope:hbase.pb.ScanRequest) } + public interface CursorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional bytes row = 1; + /** + * optional bytes row = 1; + */ + boolean hasRow(); + /** + * optional bytes row = 1; + */ + com.google.protobuf.ByteString getRow(); + } + /** + * Protobuf type {@code hbase.pb.Cursor} + * + *
+   **
+   * Scan cursor to tell client where we are scanning.
+   * 
+ */ + public static final class Cursor extends + com.google.protobuf.GeneratedMessage + implements CursorOrBuilder { + // Use Cursor.newBuilder() to construct. + private Cursor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Cursor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Cursor defaultInstance; + public static Cursor getDefaultInstance() { + return defaultInstance; + } + + public Cursor getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Cursor( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + row_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Cursor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Cursor(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional bytes row = 1; + public static final int ROW_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString row_; + /** + * optional bytes row = 1; + */ + public boolean hasRow() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bytes row = 1; + */ + public com.google.protobuf.ByteString getRow() { + return row_; + } + + private void initFields() { + row_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, row_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, row_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor) obj; + + boolean result = true; + result = result && (hasRow() == other.hasRow()); + if (hasRow()) { + result = result && getRow() + .equals(other.getRow()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRow()) { + hash = (37 * hash) + ROW_FIELD_NUMBER; + hash = (53 * hash) + getRow().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.Cursor} + * + *
+     **
+     * Scan cursor to tell client where we are scanning.
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + row_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor build() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.row_ = row_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.getDefaultInstance()) return this; + if (other.hasRow()) { + setRow(other.getRow()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional bytes row = 1; + private com.google.protobuf.ByteString row_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes row = 1; + */ + public boolean hasRow() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bytes row = 1; + */ + public com.google.protobuf.ByteString getRow() { + return row_; + } + /** + * optional bytes row = 1; + */ + public Builder setRow(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + row_ = value; + onChanged(); + return this; + } + /** + * optional bytes row = 1; + */ + public Builder clearRow() { + bitField0_ = (bitField0_ & ~0x00000001); + row_ = getDefaultInstance().getRow(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.Cursor) + } + + static { + defaultInstance = new Cursor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.Cursor) + } + public interface ScanResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -19584,6 +20121,35 @@ public final class ClientProtos { *
*/ long getMvccReadPoint(); + + // optional .hbase.pb.Cursor cursor = 12; + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ */ + boolean hasCursor(); + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor getCursor(); + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder getCursorOrBuilder(); } /** * Protobuf type {@code hbase.pb.ScanResponse} @@ -19740,6 +20306,19 @@ public final class ClientProtos { mvccReadPoint_ = input.readUInt64(); break; } + case 98: { + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder subBuilder = null; + if (((bitField0_ & 0x00000100) == 0x00000100)) { + subBuilder = cursor_.toBuilder(); + } + cursor_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(cursor_); + cursor_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000100; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -20151,6 +20730,43 @@ public final class ClientProtos { return mvccReadPoint_; } + // optional .hbase.pb.Cursor cursor = 12; + public static final int CURSOR_FIELD_NUMBER = 12; + private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor cursor_; + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ */ + public boolean hasCursor() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor getCursor() { + return cursor_; + } + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+     * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+     * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder getCursorOrBuilder() { + return cursor_; + } + private void initFields() { cellsPerResult_ = java.util.Collections.emptyList(); scannerId_ = 0L; @@ -20163,6 +20779,7 @@ public final class ClientProtos { heartbeatMessage_ = false; scanMetrics_ = org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics.getDefaultInstance(); mvccReadPoint_ = 0L; + cursor_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -20209,6 +20826,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeUInt64(11, mvccReadPoint_); } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeMessage(12, cursor_); + } getUnknownFields().writeTo(output); } @@ -20269,6 +20889,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(11, mvccReadPoint_); } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(12, cursor_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -20338,6 +20962,11 @@ public final class ClientProtos { result = result && (getMvccReadPoint() == other.getMvccReadPoint()); } + result = result && (hasCursor() == other.hasCursor()); + if (hasCursor()) { + result = result && getCursor() + .equals(other.getCursor()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -20395,6 +21024,10 @@ public final class ClientProtos { hash = (37 * hash) + MVCC_READ_POINT_FIELD_NUMBER; hash = (53 * hash) + hashLong(getMvccReadPoint()); } + if (hasCursor()) { + hash = (37 * hash) + CURSOR_FIELD_NUMBER; + hash = (53 * hash) + getCursor().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -20504,6 +21137,7 @@ public final class ClientProtos { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getResultsFieldBuilder(); getScanMetricsFieldBuilder(); + getCursorFieldBuilder(); } } private static Builder create() { @@ -20542,6 +21176,12 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000200); mvccReadPoint_ = 0L; bitField0_ = (bitField0_ & ~0x00000400); + if (cursorBuilder_ == null) { + cursor_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.getDefaultInstance(); + } else { + cursorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000800); return this; } @@ -20625,6 +21265,14 @@ public final class ClientProtos { to_bitField0_ |= 0x00000080; } result.mvccReadPoint_ = mvccReadPoint_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000100; + } + if (cursorBuilder_ == null) { + result.cursor_ = cursor_; + } else { + result.cursor_ = cursorBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -20711,6 +21359,9 @@ public final class ClientProtos { if (other.hasMvccReadPoint()) { setMvccReadPoint(other.getMvccReadPoint()); } + if (other.hasCursor()) { + mergeCursor(other.getCursor()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -21836,6 +22487,168 @@ public final class ClientProtos { return this; } + // optional .hbase.pb.Cursor cursor = 12; + private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor cursor_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder> cursorBuilder_; + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ */ + public boolean hasCursor() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor getCursor() { + if (cursorBuilder_ == null) { + return cursor_; + } else { + return cursorBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ */ + public Builder setCursor(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor value) { + if (cursorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cursor_ = value; + onChanged(); + } else { + cursorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000800; + return this; + } + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ */ + public Builder setCursor( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder builderForValue) { + if (cursorBuilder_ == null) { + cursor_ = builderForValue.build(); + onChanged(); + } else { + cursorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000800; + return this; + } + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ */ + public Builder mergeCursor(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor value) { + if (cursorBuilder_ == null) { + if (((bitField0_ & 0x00000800) == 0x00000800) && + cursor_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.getDefaultInstance()) { + cursor_ = + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.newBuilder(cursor_).mergeFrom(value).buildPartial(); + } else { + cursor_ = value; + } + onChanged(); + } else { + cursorBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000800; + return this; + } + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ */ + public Builder clearCursor() { + if (cursorBuilder_ == null) { + cursor_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.getDefaultInstance(); + onChanged(); + } else { + cursorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000800); + return this; + } + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder getCursorBuilder() { + bitField0_ |= 0x00000800; + onChanged(); + return getCursorFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder getCursorOrBuilder() { + if (cursorBuilder_ != null) { + return cursorBuilder_.getMessageOrBuilder(); + } else { + return cursor_; + } + } + /** + * optional .hbase.pb.Cursor cursor = 12; + * + *
+       * If the Scan need cursor, return the row key we are scanning in heartbeat message.
+       * If the Scan doesn't need a cursor, don't set this field to reduce network IO.
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder> + getCursorFieldBuilder() { + if (cursorBuilder_ == null) { + cursorBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder>( + cursor_, + getParentForChildren(), + isClean()); + cursor_ = null; + } + return cursorBuilder_; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.ScanResponse) } @@ -40063,6 +40876,11 @@ public final class ClientProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_ScanRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_Cursor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_Cursor_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_ScanResponse_descriptor; private static @@ -40232,7 +41050,7 @@ public final class ClientProtos { "tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" + "_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" + "\030\001 \001(\0132\020.hbase.pb.Result\022\021\n\tprocessed\030\002 " + - "\001(\010\"\203\006\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." + + "\001(\010\"\246\006\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." + "Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" + "eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" + "w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" + @@ -40250,99 +41068,101 @@ public final class ClientProtos { "nt\030\024 \001(\004:\0010\022\037\n\021include_start_row\030\025 \001(\010:\004" + "true\022\037\n\020include_stop_row\030\026 \001(\010:\005false\0222\n" + "\010readType\030\027 \001(\0162\027.hbase.pb.Scan.ReadType" + - ":\007DEFAULT\".\n\010ReadType\022\013\n\007DEFAULT\020\000\022\n\n\006ST" + - "REAM\020\001\022\t\n\005PREAD\020\002\"\300\002\n\013ScanRequest\022)\n\006reg" + - "ion\030\001 \001(\0132\031.hbase.pb.RegionSpecifier\022\034\n\004" + - "scan\030\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_id" + - "\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclose_" + - "scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027" + - "client_handles_partials\030\007 \001(\010\022!\n\031client_", - "handles_heartbeats\030\010 \001(\010\022\032\n\022track_scan_m" + - "etrics\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\022\030\n\rli" + - "mit_of_rows\030\013 \001(\r:\0010\"\266\002\n\014ScanResponse\022\030\n" + - "\020cells_per_result\030\001 \003(\r\022\022\n\nscanner_id\030\002 " + - "\001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022!" + - "\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022\r\n\005sta" + - "le\030\006 \001(\010\022\037\n\027partial_flag_per_result\030\007 \003(" + - "\010\022\036\n\026more_results_in_region\030\010 \001(\010\022\031\n\021hea" + - "rtbeat_message\030\t \001(\010\022+\n\014scan_metrics\030\n \001" + - "(\0132\025.hbase.pb.ScanMetrics\022\032\n\017mvcc_read_p", - "oint\030\013 \001(\004:\0010\"\240\002\n\024BulkLoadHFileRequest\022)" + - "\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifie" + - "r\022>\n\013family_path\030\002 \003(\0132).hbase.pb.BulkLo" + - "adHFileRequest.FamilyPath\022\026\n\016assign_seq_" + - "num\030\003 \001(\010\022+\n\010fs_token\030\004 \001(\0132\031.hbase.pb.D" + - "elegationToken\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tco" + - "py_file\030\006 \001(\010:\005false\032*\n\nFamilyPath\022\016\n\006fa" + - "mily\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFil" + - "eResponse\022\016\n\006loaded\030\001 \002(\010\"V\n\017DelegationT" + - "oken\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010password\030\002 \001", - "(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030\004 \001(\t\"l\n\026Pre" + - "pareBulkLoadRequest\022\'\n\ntable_name\030\001 \002(\0132" + - "\023.hbase.pb.TableName\022)\n\006region\030\002 \001(\0132\031.h" + - "base.pb.RegionSpecifier\"-\n\027PrepareBulkLo" + - "adResponse\022\022\n\nbulk_token\030\001 \002(\t\"W\n\026Cleanu" + - "pBulkLoadRequest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006" + - "region\030\002 \001(\0132\031.hbase.pb.RegionSpecifier\"" + - "\031\n\027CleanupBulkLoadResponse\"a\n\026Coprocesso" + - "rServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service_nam" + - "e\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007request\030", - "\004 \002(\014\"B\n\030CoprocessorServiceResult\022&\n\005val" + - "ue\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"v\n\031Cop" + - "rocessorServiceRequest\022)\n\006region\030\001 \002(\0132\031" + - ".hbase.pb.RegionSpecifier\022.\n\004call\030\002 \002(\0132" + - " .hbase.pb.CoprocessorServiceCall\"o\n\032Cop" + - "rocessorServiceResponse\022)\n\006region\030\001 \002(\0132" + - "\031.hbase.pb.RegionSpecifier\022&\n\005value\030\002 \002(" + - "\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Action\022\r\n" + - "\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.hbase.p" + - "b.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.", - "Get\0226\n\014service_call\030\004 \001(\0132 .hbase.pb.Cop" + - "rocessorServiceCall\"k\n\014RegionAction\022)\n\006r" + - "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\016" + - "\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.hbase.p" + - "b.Action\"c\n\017RegionLoadStats\022\027\n\014memstoreL" + - "oad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035" + - "\n\022compactionPressure\030\003 \001(\005:\0010\"j\n\024MultiRe" + - "gionLoadStats\022)\n\006region\030\001 \003(\0132\031.hbase.pb" + - ".RegionSpecifier\022\'\n\004stat\030\002 \003(\0132\031.hbase.p" + - "b.RegionLoadStats\"\336\001\n\021ResultOrException\022", - "\r\n\005index\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.p" + - "b.Result\022*\n\texception\030\003 \001(\0132\027.hbase.pb.N" + - "ameBytesPair\022:\n\016service_result\030\004 \001(\0132\".h" + - "base.pb.CoprocessorServiceResult\0220\n\tload" + - "Stats\030\005 \001(\0132\031.hbase.pb.RegionLoadStatsB\002" + - "\030\001\"x\n\022RegionActionResult\0226\n\021resultOrExce" + - "ption\030\001 \003(\0132\033.hbase.pb.ResultOrException" + - "\022*\n\texception\030\002 \001(\0132\027.hbase.pb.NameBytes" + - "Pair\"x\n\014MultiRequest\022,\n\014regionAction\030\001 \003" + - "(\0132\026.hbase.pb.RegionAction\022\022\n\nnonceGroup", - "\030\002 \001(\004\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb.Con" + - "dition\"\226\001\n\rMultiResponse\0228\n\022regionAction" + - "Result\030\001 \003(\0132\034.hbase.pb.RegionActionResu" + - "lt\022\021\n\tprocessed\030\002 \001(\010\0228\n\020regionStatistic" + - "s\030\003 \001(\0132\036.hbase.pb.MultiRegionLoadStats*" + - "\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\001" + - "2\263\005\n\rClientService\0222\n\003Get\022\024.hbase.pb.Get" + - "Request\032\025.hbase.pb.GetResponse\022;\n\006Mutate" + - "\022\027.hbase.pb.MutateRequest\032\030.hbase.pb.Mut" + - "ateResponse\0225\n\004Scan\022\025.hbase.pb.ScanReque", - "st\032\026.hbase.pb.ScanResponse\022P\n\rBulkLoadHF" + - "ile\022\036.hbase.pb.BulkLoadHFileRequest\032\037.hb" + - "ase.pb.BulkLoadHFileResponse\022V\n\017PrepareB" + - "ulkLoad\022 .hbase.pb.PrepareBulkLoadReques" + - "t\032!.hbase.pb.PrepareBulkLoadResponse\022V\n\017" + - "CleanupBulkLoad\022 .hbase.pb.CleanupBulkLo" + - "adRequest\032!.hbase.pb.CleanupBulkLoadResp" + - "onse\022X\n\013ExecService\022#.hbase.pb.Coprocess" + - "orServiceRequest\032$.hbase.pb.CoprocessorS" + - "erviceResponse\022d\n\027ExecRegionServerServic", - "e\022#.hbase.pb.CoprocessorServiceRequest\032$" + - ".hbase.pb.CoprocessorServiceResponse\0228\n\005" + - "Multi\022\026.hbase.pb.MultiRequest\032\027.hbase.pb" + - ".MultiResponseBB\n*org.apache.hadoop.hbas" + - "e.protobuf.generatedB\014ClientProtosH\001\210\001\001\240" + - "\001\001" + ":\007DEFAULT\022!\n\022need_cursor_result\030\030 \001(\010:\005f" + + "alse\".\n\010ReadType\022\013\n\007DEFAULT\020\000\022\n\n\006STREAM\020" + + "\001\022\t\n\005PREAD\020\002\"\300\002\n\013ScanRequest\022)\n\006region\030\001" + + " \001(\0132\031.hbase.pb.RegionSpecifier\022\034\n\004scan\030" + + "\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_id\030\003 \001(" + + "\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclose_scann" + + "er\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027clien", + "t_handles_partials\030\007 \001(\010\022!\n\031client_handl" + + "es_heartbeats\030\010 \001(\010\022\032\n\022track_scan_metric" + + "s\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\022\030\n\rlimit_o" + + "f_rows\030\013 \001(\r:\0010\"\025\n\006Cursor\022\013\n\003row\030\001 \001(\014\"\330" + + "\002\n\014ScanResponse\022\030\n\020cells_per_result\030\001 \003(" + + "\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003 " + + "\001(\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbas" + + "e.pb.Result\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_fl" + + "ag_per_result\030\007 \003(\010\022\036\n\026more_results_in_r" + + "egion\030\010 \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+", + "\n\014scan_metrics\030\n \001(\0132\025.hbase.pb.ScanMetr" + + "ics\022\032\n\017mvcc_read_point\030\013 \001(\004:\0010\022 \n\006curso" + + "r\030\014 \001(\0132\020.hbase.pb.Cursor\"\240\002\n\024BulkLoadHF" + + "ileRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" + + "gionSpecifier\022>\n\013family_path\030\002 \003(\0132).hba" + + "se.pb.BulkLoadHFileRequest.FamilyPath\022\026\n" + + "\016assign_seq_num\030\003 \001(\010\022+\n\010fs_token\030\004 \001(\0132" + + "\031.hbase.pb.DelegationToken\022\022\n\nbulk_token" + + "\030\005 \001(\t\022\030\n\tcopy_file\030\006 \001(\010:\005false\032*\n\nFami" + + "lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025", + "BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"V\n" + + "\017DelegationToken\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010" + + "password\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030" + + "\004 \001(\t\"l\n\026PrepareBulkLoadRequest\022\'\n\ntable" + + "_name\030\001 \002(\0132\023.hbase.pb.TableName\022)\n\006regi" + + "on\030\002 \001(\0132\031.hbase.pb.RegionSpecifier\"-\n\027P" + + "repareBulkLoadResponse\022\022\n\nbulk_token\030\001 \002" + + "(\t\"W\n\026CleanupBulkLoadRequest\022\022\n\nbulk_tok" + + "en\030\001 \002(\t\022)\n\006region\030\002 \001(\0132\031.hbase.pb.Regi" + + "onSpecifier\"\031\n\027CleanupBulkLoadResponse\"a", + "\n\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n" + + "\014service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t" + + "\022\017\n\007request\030\004 \002(\014\"B\n\030CoprocessorServiceR" + + "esult\022&\n\005value\030\001 \001(\0132\027.hbase.pb.NameByte" + + "sPair\"v\n\031CoprocessorServiceRequest\022)\n\006re" + + "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n" + + "\004call\030\002 \002(\0132 .hbase.pb.CoprocessorServic" + + "eCall\"o\n\032CoprocessorServiceResponse\022)\n\006r" + + "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&" + + "\n\005value\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226", + "\001\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001" + + "(\0132\027.hbase.pb.MutationProto\022\032\n\003get\030\003 \001(\013" + + "2\r.hbase.pb.Get\0226\n\014service_call\030\004 \001(\0132 ." + + "hbase.pb.CoprocessorServiceCall\"k\n\014Regio" + + "nAction\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regio" + + "nSpecifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003" + + "(\0132\020.hbase.pb.Action\"c\n\017RegionLoadStats\022" + + "\027\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupanc" + + "y\030\002 \001(\005:\0010\022\035\n\022compactionPressure\030\003 \001(\005:\001" + + "0\"j\n\024MultiRegionLoadStats\022)\n\006region\030\001 \003(", + "\0132\031.hbase.pb.RegionSpecifier\022\'\n\004stat\030\002 \003" + + "(\0132\031.hbase.pb.RegionLoadStats\"\336\001\n\021Result" + + "OrException\022\r\n\005index\030\001 \001(\r\022 \n\006result\030\002 \001" + + "(\0132\020.hbase.pb.Result\022*\n\texception\030\003 \001(\0132" + + "\027.hbase.pb.NameBytesPair\022:\n\016service_resu" + + "lt\030\004 \001(\0132\".hbase.pb.CoprocessorServiceRe" + + "sult\0220\n\tloadStats\030\005 \001(\0132\031.hbase.pb.Regio" + + "nLoadStatsB\002\030\001\"x\n\022RegionActionResult\0226\n\021" + + "resultOrException\030\001 \003(\0132\033.hbase.pb.Resul" + + "tOrException\022*\n\texception\030\002 \001(\0132\027.hbase.", + "pb.NameBytesPair\"x\n\014MultiRequest\022,\n\014regi" + + "onAction\030\001 \003(\0132\026.hbase.pb.RegionAction\022\022" + + "\n\nnonceGroup\030\002 \001(\004\022&\n\tcondition\030\003 \001(\0132\023." + + "hbase.pb.Condition\"\226\001\n\rMultiResponse\0228\n\022" + + "regionActionResult\030\001 \003(\0132\034.hbase.pb.Regi" + + "onActionResult\022\021\n\tprocessed\030\002 \001(\010\0228\n\020reg" + + "ionStatistics\030\003 \001(\0132\036.hbase.pb.MultiRegi" + + "onLoadStats*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014" + + "\n\010TIMELINE\020\0012\263\005\n\rClientService\0222\n\003Get\022\024." + + "hbase.pb.GetRequest\032\025.hbase.pb.GetRespon", + "se\022;\n\006Mutate\022\027.hbase.pb.MutateRequest\032\030." + + "hbase.pb.MutateResponse\0225\n\004Scan\022\025.hbase." + + "pb.ScanRequest\032\026.hbase.pb.ScanResponse\022P" + + "\n\rBulkLoadHFile\022\036.hbase.pb.BulkLoadHFile" + + "Request\032\037.hbase.pb.BulkLoadHFileResponse" + + "\022V\n\017PrepareBulkLoad\022 .hbase.pb.PrepareBu" + + "lkLoadRequest\032!.hbase.pb.PrepareBulkLoad" + + "Response\022V\n\017CleanupBulkLoad\022 .hbase.pb.C" + + "leanupBulkLoadRequest\032!.hbase.pb.Cleanup" + + "BulkLoadResponse\022X\n\013ExecService\022#.hbase.", + "pb.CoprocessorServiceRequest\032$.hbase.pb." + + "CoprocessorServiceResponse\022d\n\027ExecRegion" + + "ServerService\022#.hbase.pb.CoprocessorServ" + + "iceRequest\032$.hbase.pb.CoprocessorService" + + "Response\0228\n\005Multi\022\026.hbase.pb.MultiReques" + + "t\032\027.hbase.pb.MultiResponseBB\n*org.apache" + + ".hadoop.hbase.protobuf.generatedB\014Client" + + "ProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -40432,21 +41252,27 @@ public final class ClientProtos { internal_static_hbase_pb_Scan_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_Scan_descriptor, - new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", "IncludeStartRow", "IncludeStopRow", "ReadType", }); + new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", "IncludeStartRow", "IncludeStopRow", "ReadType", "NeedCursorResult", }); internal_static_hbase_pb_ScanRequest_descriptor = getDescriptor().getMessageTypes().get(12); internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ScanRequest_descriptor, new java.lang.String[] { "Region", "Scan", "ScannerId", "NumberOfRows", "CloseScanner", "NextCallSeq", "ClientHandlesPartials", "ClientHandlesHeartbeats", "TrackScanMetrics", "Renew", "LimitOfRows", }); - internal_static_hbase_pb_ScanResponse_descriptor = + internal_static_hbase_pb_Cursor_descriptor = getDescriptor().getMessageTypes().get(13); + internal_static_hbase_pb_Cursor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_Cursor_descriptor, + new java.lang.String[] { "Row", }); + internal_static_hbase_pb_ScanResponse_descriptor = + getDescriptor().getMessageTypes().get(14); internal_static_hbase_pb_ScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ScanResponse_descriptor, - new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", "Stale", "PartialFlagPerResult", "MoreResultsInRegion", "HeartbeatMessage", "ScanMetrics", "MvccReadPoint", }); + new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", "Stale", "PartialFlagPerResult", "MoreResultsInRegion", "HeartbeatMessage", "ScanMetrics", "MvccReadPoint", "Cursor", }); internal_static_hbase_pb_BulkLoadHFileRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_BulkLoadHFileRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BulkLoadHFileRequest_descriptor, @@ -40458,109 +41284,109 @@ public final class ClientProtos { internal_static_hbase_pb_BulkLoadHFileRequest_FamilyPath_descriptor, new java.lang.String[] { "Family", "Path", }); internal_static_hbase_pb_BulkLoadHFileResponse_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(16); internal_static_hbase_pb_BulkLoadHFileResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BulkLoadHFileResponse_descriptor, new java.lang.String[] { "Loaded", }); internal_static_hbase_pb_DelegationToken_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(17); internal_static_hbase_pb_DelegationToken_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_DelegationToken_descriptor, new java.lang.String[] { "Identifier", "Password", "Kind", "Service", }); internal_static_hbase_pb_PrepareBulkLoadRequest_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(18); internal_static_hbase_pb_PrepareBulkLoadRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_PrepareBulkLoadRequest_descriptor, new java.lang.String[] { "TableName", "Region", }); internal_static_hbase_pb_PrepareBulkLoadResponse_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(19); internal_static_hbase_pb_PrepareBulkLoadResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_PrepareBulkLoadResponse_descriptor, new java.lang.String[] { "BulkToken", }); internal_static_hbase_pb_CleanupBulkLoadRequest_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(20); internal_static_hbase_pb_CleanupBulkLoadRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_CleanupBulkLoadRequest_descriptor, new java.lang.String[] { "BulkToken", "Region", }); internal_static_hbase_pb_CleanupBulkLoadResponse_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(21); internal_static_hbase_pb_CleanupBulkLoadResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_CleanupBulkLoadResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_CoprocessorServiceCall_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(22); internal_static_hbase_pb_CoprocessorServiceCall_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_CoprocessorServiceCall_descriptor, new java.lang.String[] { "Row", "ServiceName", "MethodName", "Request", }); internal_static_hbase_pb_CoprocessorServiceResult_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_CoprocessorServiceResult_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_CoprocessorServiceResult_descriptor, new java.lang.String[] { "Value", }); internal_static_hbase_pb_CoprocessorServiceRequest_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_CoprocessorServiceRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_CoprocessorServiceRequest_descriptor, new java.lang.String[] { "Region", "Call", }); internal_static_hbase_pb_CoprocessorServiceResponse_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(25); internal_static_hbase_pb_CoprocessorServiceResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_CoprocessorServiceResponse_descriptor, new java.lang.String[] { "Region", "Value", }); internal_static_hbase_pb_Action_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(26); internal_static_hbase_pb_Action_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_Action_descriptor, new java.lang.String[] { "Index", "Mutation", "Get", "ServiceCall", }); internal_static_hbase_pb_RegionAction_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(27); internal_static_hbase_pb_RegionAction_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionAction_descriptor, new java.lang.String[] { "Region", "Atomic", "Action", }); internal_static_hbase_pb_RegionLoadStats_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(28); internal_static_hbase_pb_RegionLoadStats_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionLoadStats_descriptor, new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", "CompactionPressure", }); internal_static_hbase_pb_MultiRegionLoadStats_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(29); internal_static_hbase_pb_MultiRegionLoadStats_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MultiRegionLoadStats_descriptor, new java.lang.String[] { "Region", "Stat", }); internal_static_hbase_pb_ResultOrException_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(30); internal_static_hbase_pb_ResultOrException_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ResultOrException_descriptor, new java.lang.String[] { "Index", "Result", "Exception", "ServiceResult", "LoadStats", }); internal_static_hbase_pb_RegionActionResult_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(31); internal_static_hbase_pb_RegionActionResult_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionActionResult_descriptor, new java.lang.String[] { "ResultOrException", "Exception", }); internal_static_hbase_pb_MultiRequest_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(32); internal_static_hbase_pb_MultiRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MultiRequest_descriptor, new java.lang.String[] { "RegionAction", "NonceGroup", "Condition", }); internal_static_hbase_pb_MultiResponse_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(33); internal_static_hbase_pb_MultiResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MultiResponse_descriptor, diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index bbb2c455e48..725a2b5e6d1 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -259,13 +259,13 @@ message Scan { optional uint64 mvcc_read_point = 20 [default = 0]; optional bool include_start_row = 21 [default = true]; optional bool include_stop_row = 22 [default = false]; - enum ReadType { DEFAULT = 0; STREAM = 1; PREAD = 2; } optional ReadType readType = 23 [default = DEFAULT]; + optional bool need_cursor_result = 24 [default = false]; } /** @@ -294,6 +294,14 @@ message ScanRequest { optional uint32 limit_of_rows = 11 [default = 0]; } +/** +* Scan cursor to tell client where we are scanning. +* + */ +message Cursor { + optional bytes row = 1; +} + /** * The scan response. If there are no more results, more_results will * be false. If it is not specified, it means there are more. @@ -346,6 +354,10 @@ message ScanResponse { // make use of this mvcc_read_point when restarting a scanner to get a consistent view // of a row. optional uint64 mvcc_read_point = 11 [default = 0]; + + // If the Scan need cursor, return the row key we are scanning in heartbeat message. + // If the Scan doesn't need a cursor, don't set this field to reduce network IO. + optional Cursor cursor = 12; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 2bae0449c3d..1d049442506 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -380,14 +380,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private final RpcCallback closeCallBack; private final RpcCallback shippedCallback; private byte[] rowOfLastPartialResult; + private boolean needCursor; public RegionScannerHolder(String scannerName, RegionScanner s, Region r, - RpcCallback closeCallBack, RpcCallback shippedCallback) { + RpcCallback closeCallBack, RpcCallback shippedCallback, boolean needCursor) { this.scannerName = scannerName; this.s = s; this.r = r; this.closeCallBack = closeCallBack; this.shippedCallback = shippedCallback; + this.needCursor = needCursor; } public long getNextCallSeq() { @@ -1295,8 +1297,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return lastBlock; } - private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Region r) - throws LeaseStillHeldException { + private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Region r, + boolean needCursor) throws LeaseStillHeldException { Lease lease = regionServer.leases.createLease(scannerName, this.scannerLeaseTimeoutPeriod, new ScannerListener(scannerName)); RpcCallback shippedCallback = new RegionScannerShippedCallBack(scannerName, s, lease); @@ -1307,7 +1309,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, closeCallback = new RegionScannerCloseCallBack(s); } RegionScannerHolder rsh = - new RegionScannerHolder(scannerName, s, r, closeCallback, shippedCallback); + new RegionScannerHolder(scannerName, s, r, closeCallback, shippedCallback, needCursor); RegionScannerHolder existing = scanners.putIfAbsent(scannerName, rsh); assert existing == null : "scannerId must be unique within regionserver's whole lifecycle!"; return rsh; @@ -2857,7 +2859,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, builder.setMvccReadPoint(scanner.getMvccReadPoint()); builder.setTtl(scannerLeaseTimeoutPeriod); String scannerName = String.valueOf(scannerId); - return addScanner(scannerName, scanner, region); + return addScanner(scannerName, scanner, region, scan.isNeedCursorResult()); } private void checkScanNextCallSeq(ScanRequest request, RegionScannerHolder rsh) @@ -3054,6 +3056,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (moreRows) { // Heartbeat messages occur when the time limit has been reached. builder.setHeartbeatMessage(timeLimitReached); + if (timeLimitReached && rsh.needCursor) { + Cell readingCell = scannerContext.getPeekedCellInHeartbeat(); + if (readingCell != null ) { + builder.setCursor(ProtobufUtil.toCursor(readingCell)); + } + } } break; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java index 19c106bbaa8..2bab82e51cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.regionserver; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -94,6 +97,8 @@ public class ScannerContext { boolean keepProgress; private static boolean DEFAULT_KEEP_PROGRESS = false; + private Cell peekedCellInHeartbeat = null; + /** * Tracks the relevant server side metrics during scans. null when metrics should not be tracked */ @@ -328,6 +333,14 @@ public class ScannerContext { || checkTimeLimit(checkerScope); } + public Cell getPeekedCellInHeartbeat() { + return peekedCellInHeartbeat; + } + + public void setPeekedCellInHeartbeat(Cell peekedCellInHeartbeat) { + this.peekedCellInHeartbeat = peekedCellInHeartbeat; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index b063060b7fb..4593a4dddbe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -585,6 +585,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner if ((kvsScanned % cellsPerHeartbeatCheck == 0)) { scannerContext.updateTimeProgress(); if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { + scannerContext.setPeekedCellInHeartbeat(prevCell); return scannerContext.setScannerState(NextState.TIME_LIMIT_REACHED).hasMoreValues(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerCursor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerCursor.java new file mode 100644 index 00000000000..e40b808f194 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerCursor.java @@ -0,0 +1,191 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTestConst; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterBase; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(MediumTests.class) +public class TestScannerCursor { + + private static final Log LOG = + LogFactory.getLog(TestScannerCursor.class); + + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static Table TABLE = null; + + /** + * Table configuration + */ + private static TableName TABLE_NAME = TableName.valueOf("TestScannerCursor"); + + private static int NUM_ROWS = 5; + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[][] ROWS = HTestConst.makeNAscii(ROW, NUM_ROWS); + + private static int NUM_FAMILIES = 2; + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, NUM_FAMILIES); + + private static int NUM_QUALIFIERS = 2; + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, NUM_QUALIFIERS); + + private static int VALUE_SIZE = 10; + private static byte[] VALUE = Bytes.createMaxByteArray(VALUE_SIZE); + + private static final int TIMEOUT = 4000; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + + conf.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, TIMEOUT); + conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, TIMEOUT); + + // Check the timeout condition after every cell + conf.setLong(StoreScanner.HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK, 1); + TEST_UTIL.startMiniCluster(1); + + TABLE = createTestTable(TABLE_NAME, ROWS, FAMILIES, QUALIFIERS, VALUE); + + } + + static Table createTestTable(TableName name, byte[][] rows, byte[][] families, + byte[][] qualifiers, byte[] cellValue) throws IOException { + Table ht = TEST_UTIL.createTable(name, families); + List puts = createPuts(rows, families, qualifiers, cellValue); + ht.put(puts); + return ht; + } + + static ArrayList createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers, + byte[] value) throws IOException { + Put put; + ArrayList puts = new ArrayList<>(); + + for (int row = 0; row < rows.length; row++) { + put = new Put(rows[row]); + for (int fam = 0; fam < families.length; fam++) { + for (int qual = 0; qual < qualifiers.length; qual++) { + KeyValue kv = new KeyValue(rows[row], families[fam], qualifiers[qual], qual, value); + put.add(kv); + } + } + puts.add(put); + } + + return puts; + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + public static class SparseFilter extends FilterBase { + + @Override + public ReturnCode filterKeyValue(Cell v) throws IOException { + Threads.sleep(TIMEOUT / 2 + 100); + return Bytes.equals(CellUtil.cloneRow(v), ROWS[NUM_ROWS - 1]) ? ReturnCode.INCLUDE + : ReturnCode.SKIP; + } + + public static Filter parseFrom(final byte[] pbBytes) { + return new SparseFilter(); + } + } + + @Test + public void testHeartbeatWithSparseFilter() throws Exception { + Scan scan = new Scan(); + scan.setMaxResultSize(Long.MAX_VALUE); + scan.setCaching(Integer.MAX_VALUE); + scan.setNeedCursorResult(true); + scan.setAllowPartialResults(true); + scan.setFilter(new SparseFilter()); + try(ResultScanner scanner = TABLE.getScanner(scan)) { + int num = 0; + Result r; + while ((r = scanner.next()) != null) { + + if (num < (NUM_ROWS - 1) * NUM_FAMILIES * NUM_QUALIFIERS) { + Assert.assertTrue(r.isCursor()); + Assert.assertArrayEquals(ROWS[num / NUM_FAMILIES / NUM_QUALIFIERS], r.getCursor().getRow()); + } else { + Assert.assertFalse(r.isCursor()); + Assert.assertArrayEquals(ROWS[num / NUM_FAMILIES / NUM_QUALIFIERS], r.getRow()); + } + num++; + } + } + } + + @Test + public void testSizeLimit() throws IOException { + Scan scan = new Scan(); + scan.setMaxResultSize(1); + scan.setCaching(Integer.MAX_VALUE); + scan.setNeedCursorResult(true); + try (ResultScanner scanner = TABLE.getScanner(scan)) { + int num = 0; + Result r; + while ((r = scanner.next()) != null) { + + if (num % (NUM_FAMILIES * NUM_QUALIFIERS) != (NUM_FAMILIES * NUM_QUALIFIERS)-1) { + Assert.assertTrue(r.isCursor()); + Assert.assertArrayEquals(ROWS[num / NUM_FAMILIES / NUM_QUALIFIERS], r.getCursor().getRow()); + } else { + Assert.assertFalse(r.isCursor()); + Assert.assertArrayEquals(ROWS[num / NUM_FAMILIES / NUM_QUALIFIERS], r.getRow()); + } + num++; + } + } + } + +}