diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index f9e44317305..209ec702b03 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -268,10 +268,6 @@ org.apache.hbase hbase-metrics - - org.apache.hbase - hbase-protocol - org.apache.hbase hbase-protocol-shaded diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index ff0e8fbd4ba..f3845795f53 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -95,10 +95,6 @@ org.apache.hbase hbase-protocol-shaded - - org.apache.hbase - hbase-protocol - com.github.stephenc.findbugs diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 2234a2a48ab..f9b6d7b826e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1700,19 +1700,26 @@ public interface Admin extends Abortable, Closeable { List getQuota(QuotaFilter filter) throws IOException; /** - * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the active - * master.

The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access - * a published coprocessor {@link com.google.protobuf.Service} using standard protobuf service - * invocations:

- *
+   * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
+   * instance connected to the active master.
+   * 

+ * The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be + * used to access a published coprocessor + * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} using standard protobuf service + * invocations: + *

+ *

+ *
+ *
    * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
    * MyCallRequest request = MyCallRequest.newBuilder()
    *     ...
    *     .build();
    * MyCallResponse response = service.myCall(null, request);
-   * 
- * + *
+ *
+ *
* @return A MasterCoprocessorRpcChannel instance * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any * more. Use the coprocessorService methods in {@link AsyncAdmin} instead. @@ -1722,24 +1729,25 @@ public interface Admin extends Abortable, Closeable { /** - * Creates and returns a {@link com.google.protobuf.RpcChannel} instance - * connected to the passed region server. - * - *

- * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published - * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: - *

- * - *
- *
+   * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
+   * instance connected to the passed region server.
+   * 

+ * The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be + * used to access a published coprocessor + * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} using standard protobuf service + * invocations: + *

+ *

+ *
    * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
    * MyCallRequest request = MyCallRequest.newBuilder()
    *     ...
    *     .build();
    * MyCallResponse response = service.myCall(null, request);
-   * 
- * + *
+ *
+ *
* @param serverName the server name to which the endpoint call is made * @return A RegionServerCoprocessorRpcChannel instance * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index 9a7da456371..870d83d9149 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -509,18 +509,18 @@ public interface Table extends Closeable { } /** - * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the table - * region containing the specified row. The row given does not actually have to exist. Whichever - * region would contain the row based on start and end keys will be used. Note that the - * {@code row} parameter is also not passed to the coprocessor handler registered for this - * protocol, unless the {@code row} is separately passed as an argument in the service request. - * The parameter here is only used to locate the region used to handle the call. - *

- * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published - * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: - *

+ * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} + * instance connected to the table region containing the specified row. The row given does not + * actually have to exist. Whichever region would contain the row based on start and end keys will + * be used. Note that the {@code row} parameter is also not passed to the coprocessor handler + * registered for this protocol, unless the {@code row} is separately passed as an argument in the + * service request. The parameter here is only used to locate the region used to handle the call. + *

+ * The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be + * used to access a published coprocessor {@link Service} using standard protobuf service + * invocations: + *

*

- * *
    * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
@@ -529,8 +529,8 @@ public interface Table extends Closeable {
    *     .build();
    * MyCallResponse response = service.myCall(null, request);
    * 
- * - *
+ * + * * @param row The row key used to identify the remote region location * @return A CoprocessorRpcChannel instance * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any @@ -543,10 +543,10 @@ public interface Table extends Closeable { } /** - * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table - * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and - * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method - * with each {@link com.google.protobuf.Service} instance. + * Creates an instance of the given {@link Service} subclass for each table region spanning the + * range from the {@code startKey} row to {@code endKey} row (inclusive), and invokes the passed + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method with each + * {@link Service} instance. * @param service the protocol buffer {@code Service} implementation to call * @param startKey start region selection with region containing this row. If {@code null}, the * selection will start with the first table region. @@ -554,9 +554,9 @@ public interface Table extends Closeable { * {@code null}, selection will continue through the last table region. * @param callable this instance's * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will be - * invoked once per table region, using the {@link com.google.protobuf.Service} instance - * connected to that region. - * @param the {@link com.google.protobuf.Service} subclass to connect to + * invoked once per table region, using the {@link Service} instance connected to that + * region. + * @param the {@link Service} subclass to connect to * @param Return type for the {@code callable} parameter's * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method * @return a map of result values keyed by region name @@ -585,16 +585,15 @@ public interface Table extends Closeable { } /** - * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table - * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and - * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method - * with each {@link Service} instance. - *

+ * Creates an instance of the given {@link Service} subclass for each table region spanning the + * range from the {@code startKey} row to {@code endKey} row (inclusive), and invokes the passed + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method with each + * {@link Service} instance. + *

* The given * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} * method will be called with the return value from each region's * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. - *

* @param service the protocol buffer {@code Service} implementation to call * @param startKey start region selection with region containing this row. If {@code null}, the * selection will start with the first table region. @@ -622,10 +621,10 @@ public interface Table extends Closeable { } /** - * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table - * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all - * the invocations to the same region server will be batched into one call. The coprocessor - * service is invoked according to the service instance, method name and parameters. + * Creates an instance of the given {@link Service} subclass for each table region spanning the + * range from the {@code startKey} row to {@code endKey} row (inclusive), all the invocations to + * the same region server will be batched into one call. The coprocessor service is invoked + * according to the service instance, method name and parameters. * @param methodDescriptor the descriptor for the protobuf service method to call. * @param request the method call parameters * @param startKey start region selection with region containing this row. If {@code null}, the @@ -661,15 +660,14 @@ public interface Table extends Closeable { } /** - * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table - * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all - * the invocations to the same region server will be batched into one call. The coprocessor - * service is invoked according to the service instance, method name and parameters. - *

+ * Creates an instance of the given {@link Service} subclass for each table region spanning the + * range from the {@code startKey} row to {@code endKey} row (inclusive), all the invocations to + * the same region server will be batched into one call. The coprocessor service is invoked + * according to the service instance, method name and parameters. + *

* The given * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)} * method will be called with the return value from each region's invocation. - *

* @param methodDescriptor the descriptor for the protobuf service method to call. * @param request the method call parameters * @param startKey start region selection with region containing this row. If {@code null}, the diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java index c3defda5b4d..2c1647f8542 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java @@ -29,44 +29,37 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public abstract class Batch { + /** * Defines a unit of work to be executed. - * - *

+ *

* When used with - * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], - * org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} - * the implementations {@link Batch.Call#call(Object)} method will be invoked - * with a proxy to each region's coprocessor {@link com.google.protobuf.Service} implementation. - *

+ * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], Batch.Call)} + * the implementations {@link Batch.Call#call(Object)} method will be invoked with a proxy to each + * region's coprocessor {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} + * implementation. * @see org.apache.hadoop.hbase.client.coprocessor.Batch * @see org.apache.hadoop.hbase.client.Table#coprocessorService(byte[]) - * @see org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], - * org.apache.hadoop.hbase.client.coprocessor.Batch.Call) - * @param the instance type to be passed to - * {@link Batch.Call#call(Object)} + * @see org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], Batch.Call) + * @param the instance type to be passed to {@link Batch.Call#call(Object)} * @param the return type from {@link Batch.Call#call(Object)} */ @InterfaceAudience.Public - public interface Call { + public interface Call { R call(T instance) throws IOException; } /** - * Defines a generic callback to be triggered for each {@link Batch.Call#call(Object)} - * result. - * - *

+ * Defines a generic callback to be triggered for each {@link Batch.Call#call(Object)} result. + *

* When used with - * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], - * org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} - * the implementation's {@link Batch.Callback#update(byte[], byte[], Object)} - * method will be called with the {@link Batch.Call#call(Object)} return value - * from each region in the selected range. - *

+ * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], Batch.Call)} + * the implementation's {@link Batch.Callback#update(byte[], byte[], Object)} method will be + * called with the {@link Batch.Call#call(Object)} return value from each region in the selected + * range. * @param the return type from the associated {@link Batch.Call#call(Object)} * @see org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], - * org.apache.hadoop.hbase.client.coprocessor.Batch.Call) + * org.apache.hadoop.hbase.client.coprocessor.Batch.Call) */ @InterfaceAudience.Public public interface Callback { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java index 3713926c3ee..c91d3f6e621 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java @@ -24,7 +24,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; /** * Base interface which provides clients with an RPC connection to call coprocessor endpoint - * {@link com.google.protobuf.Service}s. + * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service}s. *

* Note that clients should not use this class directly, except through * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java index 10d7d8c6605..745009bc080 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java @@ -26,14 +26,13 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; /** - * Used for server-side protobuf RPC service invocations. This handler allows - * invocation exceptions to easily be passed through to the RPC server from coprocessor - * {@link com.google.protobuf.Service} implementations. - * - *

- * When implementing {@link com.google.protobuf.Service} defined methods, - * coprocessor endpoints can use the following pattern to pass exceptions back to the RPC client: - * + * Used for server-side protobuf RPC service invocations. This handler allows invocation exceptions + * to easily be passed through to the RPC server from coprocessor + * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} implementations. + *

+ * When implementing {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} defined + * methods, coprocessor endpoints can use the following pattern to pass exceptions back to the RPC + * client: * public void myMethod(RpcController controller, MyRequest request, * RpcCallback<MyResponse> done) { * MyResponse response = null; @@ -47,7 +46,6 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; * done.run(response); * } * - *

*/ @InterfaceAudience.Private public class ServerRpcController implements RpcController { @@ -98,7 +96,8 @@ public class ServerRpcController implements RpcController { } /** - * Sets an exception to be communicated back to the {@link com.google.protobuf.Service} client. + * Sets an exception to be communicated back to the + * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} client. * @param ioe the exception encountered during execution of the service method */ public void setFailedOn(IOException ioe) { @@ -108,9 +107,9 @@ public class ServerRpcController implements RpcController { /** * Returns any exception thrown during service method invocation, or {@code null} if no exception - * was thrown. This can be used by clients to receive exceptions generated by RPC calls, even - * when {@link RpcCallback}s are used and no {@link com.google.protobuf.ServiceException} is - * declared. + * was thrown. This can be used by clients to receive exceptions generated by RPC calls, even when + * {@link RpcCallback}s are used and no + * {@link org.apache.hbase.thirdparty.com.google.protobuf.ServiceException} is declared. */ public IOException getFailedOn() { return serviceException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java deleted file mode 100644 index 8f9f4a84d4a..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ /dev/null @@ -1,1807 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.protobuf; - -import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC; - -import com.google.protobuf.ByteString; -import com.google.protobuf.CodedInputStream; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.Message; -import com.google.protobuf.Parser; -import com.google.protobuf.RpcChannel; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; -import com.google.protobuf.TextFormat; -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.Method; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.Objects; -import java.util.function.Function; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.Cell.Type; -import org.apache.hadoop.hbase.CellBuilderType; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.ExtendedCellBuilder; -import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.Consistency; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.SnapshotType; -import org.apache.hadoop.hbase.client.metrics.ScanMetrics; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.filter.ByteArrayComparable; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.net.Address; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; -import org.apache.hadoop.hbase.protobuf.generated.CellProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; -import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; -import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; -import org.apache.hadoop.hbase.util.Addressing; -import org.apache.hadoop.hbase.util.ByteStringer; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.DynamicClassLoader; -import org.apache.hadoop.hbase.util.ExceptionUtil; -import org.apache.hadoop.hbase.util.Methods; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.yetus.audience.InterfaceAudience; - -/** - * Protobufs utility. - * NOTE: This class OVERLAPS ProtobufUtil in the subpackage 'shaded'. The latter is used - * internally and has more methods. This Class is for Coprocessor Endpoints only though they - * should not be using this private class. It should not be depended upon. Most methods here - * are COPIED from the shaded ProtobufUtils with only difference being they refer to non-shaded - * protobufs. - * @see ProtobufUtil - */ -// TODO: Generate this class from the shaded version. -@InterfaceAudience.Private // TODO: some clients (Hive, etc) use this class. -public final class ProtobufUtil { - private ProtobufUtil() { - } - - /** - * Many results are simple: no cell, exists true or false. To save on object creations, - * we reuse them across calls. - */ - // TODO: PICK THESE UP FROM THE SHADED PROTOBUF. - private final static Cell[] EMPTY_CELL_ARRAY = new Cell[]{}; - private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY); - final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); - final static Result EMPTY_RESULT_EXISTS_FALSE = Result.create(null, false); - private final static Result EMPTY_RESULT_STALE = Result.create(EMPTY_CELL_ARRAY, null, true); - private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE - = Result.create((Cell[])null, true, true); - private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE - = Result.create((Cell[])null, false, true); - - private final static ClientProtos.Result EMPTY_RESULT_PB; - private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE; - private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_FALSE; - private final static ClientProtos.Result EMPTY_RESULT_PB_STALE; - private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE_STALE; - private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_FALSE_STALE; - - - static { - ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); - - builder.setExists(true); - builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); - - builder.setStale(true); - EMPTY_RESULT_PB_EXISTS_TRUE_STALE = builder.build(); - builder.clear(); - - builder.setExists(false); - builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); - builder.setStale(true); - EMPTY_RESULT_PB_EXISTS_FALSE_STALE = builder.build(); - - builder.clear(); - builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB = builder.build(); - builder.setStale(true); - EMPTY_RESULT_PB_STALE = builder.build(); - } - - /** - * Dynamic class loader to load filter/comparators - */ - private final static class ClassLoaderHolder { - private final static ClassLoader CLASS_LOADER; - - static { - ClassLoader parent = ProtobufUtil.class.getClassLoader(); - Configuration conf = HBaseConfiguration.create(); - CLASS_LOADER = AccessController.doPrivileged((PrivilegedAction) - () -> new DynamicClassLoader(conf, parent) - ); - } - } - - /** - * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, - * to flag what follows as a protobuf in hbase. Prepend these bytes to all content written to - * znodes, etc. - * @param bytes Bytes to decorate - * @return The passed bytes with magic prepended (Creates a new - * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. - */ - public static byte [] prependPBMagic(final byte [] bytes) { - return Bytes.add(PB_MAGIC, bytes); - } - - /** - * @param bytes Bytes to check. - * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. - */ - public static boolean isPBMagicPrefix(final byte [] bytes) { - return ProtobufMagic.isPBMagicPrefix(bytes); - } - - /** - * @param bytes Bytes to check. - * @param offset offset to start at - * @param len length to use - * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. - */ - public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) { - return ProtobufMagic.isPBMagicPrefix(bytes, offset, len); - } - - /** - * @param bytes bytes to check - * @throws DeserializationException if we are missing the pb magic prefix - */ - public static void expectPBMagicPrefix(final byte[] bytes) throws DeserializationException { - if (!isPBMagicPrefix(bytes)) { - String bytesPrefix = bytes == null ? "null" : Bytes.toStringBinary(bytes, 0, PB_MAGIC.length); - throw new DeserializationException( - "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " + bytesPrefix); - } - } - - /** - * @return Length of {@link ProtobufMagic#lengthOfPBMagic()} - */ - public static int lengthOfPBMagic() { - return ProtobufMagic.lengthOfPBMagic(); - } - - /** - * Return the IOException thrown by the remote server wrapped in - * ServiceException as cause. - * - * @param se ServiceException that wraps IO exception thrown by the server - * @return Exception wrapped in ServiceException or - * a new IOException that wraps the unexpected ServiceException. - */ - public static IOException getRemoteException(ServiceException se) { - return makeIOExceptionOfException(se); - } - - /** - * Return the Exception thrown by the remote server wrapped in - * ServiceException as cause. RemoteException are left untouched. - * - * @param e ServiceException that wraps IO exception thrown by the server - * @return Exception wrapped in ServiceException. - */ - public static IOException getServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) { - Throwable t = e.getCause(); - if (ExceptionUtil.isInterrupt(t)) { - return ExceptionUtil.asInterrupt(t); - } - return t instanceof IOException ? (IOException) t : new HBaseIOException(t); - } - - /** - * Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than - * just {@link ServiceException}. Prefer this method to - * {@link #getRemoteException(ServiceException)} because trying to - * contain direct protobuf references. - */ - public static IOException handleRemoteException(Throwable e) { - return makeIOExceptionOfException(e); - } - - private static IOException makeIOExceptionOfException(Throwable e) { - Throwable t = e; - if (e instanceof ServiceException || - e instanceof org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) { - t = e.getCause(); - } - if (ExceptionUtil.isInterrupt(t)) { - return ExceptionUtil.asInterrupt(t); - } - if (t instanceof RemoteException) { - t = ((RemoteException)t).unwrapRemoteException(); - } - return t instanceof IOException? (IOException)t: new HBaseIOException(t); - } - - /** - * Convert a ServerName to a protocol buffer ServerName - * - * @param serverName the ServerName to convert - * @return the converted protocol buffer ServerName - * @see #toServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) - */ - public static HBaseProtos.ServerName - toServerName(final ServerName serverName) { - if (serverName == null) return null; - HBaseProtos.ServerName.Builder builder = - HBaseProtos.ServerName.newBuilder(); - builder.setHostName(serverName.getHostname()); - if (serverName.getPort() >= 0) { - builder.setPort(serverName.getPort()); - } - if (serverName.getStartcode() >= 0) { - builder.setStartCode(serverName.getStartcode()); - } - return builder.build(); - } - - /** - * Convert a protocol buffer ServerName to a ServerName - * - * @param proto the protocol buffer ServerName to convert - * @return the converted ServerName - */ - public static ServerName toServerName(final HBaseProtos.ServerName proto) { - if (proto == null) return null; - String hostName = proto.getHostName(); - long startCode = -1; - int port = -1; - if (proto.hasPort()) { - port = proto.getPort(); - } - if (proto.hasStartCode()) { - startCode = proto.getStartCode(); - } - return ServerName.valueOf(hostName, port, startCode); - } - - /** - * Convert a protobuf Durability into a client Durability - */ - public static Durability toDurability( - final ClientProtos.MutationProto.Durability proto) { - switch(proto) { - case USE_DEFAULT: - return Durability.USE_DEFAULT; - case SKIP_WAL: - return Durability.SKIP_WAL; - case ASYNC_WAL: - return Durability.ASYNC_WAL; - case SYNC_WAL: - return Durability.SYNC_WAL; - case FSYNC_WAL: - return Durability.FSYNC_WAL; - default: - return Durability.USE_DEFAULT; - } - } - - /** - * Convert a client Durability into a protbuf Durability - */ - public static ClientProtos.MutationProto.Durability toDurability( - final Durability d) { - switch(d) { - case USE_DEFAULT: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; - case SKIP_WAL: - return ClientProtos.MutationProto.Durability.SKIP_WAL; - case ASYNC_WAL: - return ClientProtos.MutationProto.Durability.ASYNC_WAL; - case SYNC_WAL: - return ClientProtos.MutationProto.Durability.SYNC_WAL; - case FSYNC_WAL: - return ClientProtos.MutationProto.Durability.FSYNC_WAL; - default: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; - } - } - - /** - * Convert a protocol buffer Get to a client Get - * - * @param proto the protocol buffer Get to convert - * @return the converted client Get - * @throws IOException - */ - public static Get toGet(final ClientProtos.Get proto) throws IOException { - if (proto == null) return null; - byte[] row = proto.getRow().toByteArray(); - Get get = new Get(row); - if (proto.hasCacheBlocks()) { - get.setCacheBlocks(proto.getCacheBlocks()); - } - if (proto.hasMaxVersions()) { - get.readVersions(proto.getMaxVersions()); - } - if (proto.hasStoreLimit()) { - get.setMaxResultsPerColumnFamily(proto.getStoreLimit()); - } - if (proto.hasStoreOffset()) { - get.setRowOffsetPerColumnFamily(proto.getStoreOffset()); - } - if (proto.getCfTimeRangeCount() > 0) { - for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { - TimeRange timeRange = protoToTimeRange(cftr.getTimeRange()); - get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); - } - } - if (proto.hasTimeRange()) { - TimeRange timeRange = protoToTimeRange(proto.getTimeRange()); - get.setTimeRange(timeRange.getMin(), timeRange.getMax()); - } - if (proto.hasFilter()) { - FilterProtos.Filter filter = proto.getFilter(); - get.setFilter(ProtobufUtil.toFilter(filter)); - } - for (NameBytesPair attribute: proto.getAttributeList()) { - get.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); - } - if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { - byte[] family = column.getFamily().toByteArray(); - if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { - get.addColumn(family, qualifier.toByteArray()); - } - } else { - get.addFamily(family); - } - } - } - if (proto.hasExistenceOnly() && proto.getExistenceOnly()){ - get.setCheckExistenceOnly(true); - } - if (proto.hasConsistency()) { - get.setConsistency(toConsistency(proto.getConsistency())); - } - if (proto.hasLoadColumnFamiliesOnDemand()) { - get.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand()); - } - return get; - } - - public static Consistency toConsistency(ClientProtos.Consistency consistency) { - switch (consistency) { - case STRONG : return Consistency.STRONG; - case TIMELINE : return Consistency.TIMELINE; - default : return Consistency.STRONG; - } - } - - public static ClientProtos.Consistency toConsistency(Consistency consistency) { - switch (consistency) { - case STRONG : return ClientProtos.Consistency.STRONG; - case TIMELINE : return ClientProtos.Consistency.TIMELINE; - default : return ClientProtos.Consistency.STRONG; - } - } - - /** - * Convert a protocol buffer Mutate to a Put. - * - * @param proto The protocol buffer MutationProto to convert - * @return A client Put. - * @throws IOException - */ - public static Put toPut(final MutationProto proto) - throws IOException { - return toPut(proto, null); - } - - /** - * Convert a protocol buffer Mutate to a Put. - * - * @param proto The protocol buffer MutationProto to convert - * @param cellScanner If non-null, the Cell data that goes with this proto. - * @return A client Put. - * @throws IOException - */ - public static Put toPut(final MutationProto proto, final CellScanner cellScanner) - throws IOException { - // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? - MutationType type = proto.getMutateType(); - assert type == MutationType.PUT: type.name(); - long timestamp = proto.hasTimestamp()? proto.getTimestamp(): HConstants.LATEST_TIMESTAMP; - Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; - if (cellCount > 0) { - // The proto has metadata only and the data is separate to be found in the cellScanner. - if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); - } - for (int i = 0; i < cellCount; i++) { - if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); - } - Cell cell = cellScanner.current(); - if (put == null) { - put = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp); - } - put.add(cell); - } - } else { - if (put == null) { - throw new IllegalArgumentException("row cannot be null"); - } - // The proto has the metadata and the data itself - ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (ColumnValue column: proto.getColumnValueList()) { - byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { - if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); - } - long ts = timestamp; - if (qv.hasTimestamp()) { - ts = qv.getTimestamp(); - } - byte[] allTagsBytes; - if (qv.hasTags()) { - allTagsBytes = qv.getTags().toByteArray(); - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .setTags(allTagsBytes) - .build()); - } else { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Cell.Type.Put) - .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .setTags(allTagsBytes) - .build()); - } - } else { - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .build()); - } else{ - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) - .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Type.Put) - .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .build()); - } - } - } - } - } - put.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { - put.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); - } - return put; - } - - /** - * Convert a protocol buffer Mutate to a Delete - * - * @param proto the protocol buffer Mutate to convert - * @return the converted client Delete - * @throws IOException - */ - public static Delete toDelete(final MutationProto proto) - throws IOException { - return toDelete(proto, null); - } - - /** - * Convert a protocol buffer Mutate to a Delete - * - * @param proto the protocol buffer Mutate to convert - * @param cellScanner if non-null, the data that goes with this delete. - * @return the converted client Delete - * @throws IOException - */ - public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner) - throws IOException { - MutationType type = proto.getMutateType(); - assert type == MutationType.DELETE : type.name(); - long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; - Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; - if (cellCount > 0) { - // The proto has metadata only and the data is separate to be found in the cellScanner. - if (cellScanner == null) { - // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - TextFormat.shortDebugString(proto)); - } - for (int i = 0; i < cellCount; i++) { - if (!cellScanner.advance()) { - // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + TextFormat.shortDebugString(proto)); - } - Cell cell = cellScanner.current(); - if (delete == null) { - delete = - new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp); - } - delete.add(cell); - } - } else { - if (delete == null) { - throw new IllegalArgumentException("row cannot be null"); - } - for (ColumnValue column: proto.getColumnValueList()) { - byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { - DeleteType deleteType = qv.getDeleteType(); - byte[] qualifier = null; - if (qv.hasQualifier()) { - qualifier = qv.getQualifier().toByteArray(); - } - long ts = HConstants.LATEST_TIMESTAMP; - if (qv.hasTimestamp()) { - ts = qv.getTimestamp(); - } - if (deleteType == DeleteType.DELETE_ONE_VERSION) { - delete.addColumn(family, qualifier, ts); - } else if (deleteType == DeleteType.DELETE_MULTIPLE_VERSIONS) { - delete.addColumns(family, qualifier, ts); - } else if (deleteType == DeleteType.DELETE_FAMILY_VERSION) { - delete.addFamilyVersion(family, ts); - } else { - delete.addFamily(family, ts); - } - } - } - } - delete.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { - delete.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); - } - return delete; - } - - @FunctionalInterface - private interface ConsumerWithException { - void accept(T t, U u) throws IOException; - } - - private static T toDelta(Function supplier, ConsumerWithException consumer, - final MutationProto proto, final CellScanner cellScanner) throws IOException { - byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null; - T mutation = row == null ? null : supplier.apply(new Bytes(row)); - int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; - if (cellCount > 0) { - // The proto has metadata only and the data is separate to be found in the cellScanner. - if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); - } - for (int i = 0; i < cellCount; i++) { - if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); - } - Cell cell = cellScanner.current(); - if (mutation == null) { - mutation = supplier.apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); - } - consumer.accept(mutation, cell); - } - } else { - if (mutation == null) { - throw new IllegalArgumentException("row cannot be null"); - } - for (ColumnValue column : proto.getColumnValueList()) { - byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv : column.getQualifierValueList()) { - byte[] qualifier = qv.getQualifier().toByteArray(); - if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); - } - byte[] value = qv.getValue().toByteArray(); - byte[] tags = null; - if (qv.hasTags()) { - tags = qv.getTags().toByteArray(); - } - consumer.accept(mutation, ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(mutation.getRow()).setFamily(family) - .setQualifier(qualifier).setTimestamp(qv.getTimestamp()) - .setType(KeyValue.Type.Put.getCode()).setValue(value) - .setTags(tags).setSequenceId(0) - .build()); - } - } - } - mutation.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute : proto.getAttributeList()) { - mutation.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); - } - return mutation; - } - - /** - * Convert a protocol buffer Mutate to an Append - * @param cellScanner - * @param proto the protocol buffer Mutate to convert - * @return the converted client Append - * @throws IOException - */ - public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) - throws IOException { - MutationType type = proto.getMutateType(); - assert type == MutationType.APPEND : type.name(); - Append append = toDelta((Bytes row) -> new Append(row.get(), row.getOffset(), row.getLength()), - Append::add, proto, cellScanner); - if (proto.hasTimeRange()) { - TimeRange timeRange = protoToTimeRange(proto.getTimeRange()); - append.setTimeRange(timeRange.getMin(), timeRange.getMax()); - } - return append; - } - - /** - * Convert a protocol buffer Mutate to an Increment - * - * @param proto the protocol buffer Mutate to convert - * @return the converted client Increment - * @throws IOException - */ - public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) - throws IOException { - MutationType type = proto.getMutateType(); - assert type == MutationType.INCREMENT : type.name(); - Increment increment = toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), - Increment::add, proto, cellScanner); - if (proto.hasTimeRange()) { - TimeRange timeRange = protoToTimeRange(proto.getTimeRange()); - increment.setTimeRange(timeRange.getMin(), timeRange.getMax()); - } - return increment; - } - - /** - * Convert a MutateRequest to Mutation - * - * @param proto the protocol buffer Mutate to convert - * @return the converted Mutation - * @throws IOException - */ - public static Mutation toMutation(final MutationProto proto) throws IOException { - MutationType type = proto.getMutateType(); - if (type == MutationType.APPEND) { - return toAppend(proto, null); - } - if (type == MutationType.DELETE) { - return toDelete(proto, null); - } - if (type == MutationType.PUT) { - return toPut(proto, null); - } - throw new IOException("Unknown mutation type " + type); - } - - /** - * Convert a protocol buffer Mutate to a Get. - * @param proto the protocol buffer Mutate to convert. - * @param cellScanner - * @return the converted client get. - * @throws IOException - */ - public static Get toGet(final MutationProto proto, final CellScanner cellScanner) - throws IOException { - MutationType type = proto.getMutateType(); - assert type == MutationType.INCREMENT || type == MutationType.APPEND : type.name(); - byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null; - Get get = null; - int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; - if (cellCount > 0) { - // The proto has metadata only and the data is separate to be found in the cellScanner. - if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " - + TextFormat.shortDebugString(proto)); - } - for (int i = 0; i < cellCount; i++) { - if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i - + " no cell returned: " + TextFormat.shortDebugString(proto)); - } - Cell cell = cellScanner.current(); - if (get == null) { - get = new Get(Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); - } - get.addColumn( - Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()), - Bytes.copy(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength())); - } - } else { - get = new Get(row); - for (ColumnValue column : proto.getColumnValueList()) { - byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv : column.getQualifierValueList()) { - byte[] qualifier = qv.getQualifier().toByteArray(); - if (!qv.hasValue()) { - throw new DoNotRetryIOException("Missing required field: qualifier value"); - } - get.addColumn(family, qualifier); - } - } - } - if (proto.hasTimeRange()) { - TimeRange timeRange = protoToTimeRange(proto.getTimeRange()); - get.setTimeRange(timeRange.getMin(), timeRange.getMax()); - } - for (NameBytesPair attribute : proto.getAttributeList()) { - get.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); - } - return get; - } - - public static ClientProtos.Scan.ReadType toReadType(Scan.ReadType readType) { - switch (readType) { - case DEFAULT: - return ClientProtos.Scan.ReadType.DEFAULT; - case STREAM: - return ClientProtos.Scan.ReadType.STREAM; - case PREAD: - return ClientProtos.Scan.ReadType.PREAD; - default: - throw new IllegalArgumentException("Unknown ReadType: " + readType); - } - } - - public static Scan.ReadType toReadType(ClientProtos.Scan.ReadType readType) { - switch (readType) { - case DEFAULT: - return Scan.ReadType.DEFAULT; - case STREAM: - return Scan.ReadType.STREAM; - case PREAD: - return Scan.ReadType.PREAD; - default: - throw new IllegalArgumentException("Unknown ReadType: " + readType); - } - } - - /** - * Convert a client Scan to a protocol buffer Scan - * - * @param scan the client Scan to convert - * @return the converted protocol buffer Scan - * @throws IOException - */ - public static ClientProtos.Scan toScan( - final Scan scan) throws IOException { - ClientProtos.Scan.Builder scanBuilder = - ClientProtos.Scan.newBuilder(); - scanBuilder.setCacheBlocks(scan.getCacheBlocks()); - if (scan.getBatch() > 0) { - scanBuilder.setBatchSize(scan.getBatch()); - } - if (scan.getMaxResultSize() > 0) { - scanBuilder.setMaxResultSize(scan.getMaxResultSize()); - } - if (scan.isSmall()) { - scanBuilder.setSmall(scan.isSmall()); - } - if (scan.getAllowPartialResults()) { - scanBuilder.setAllowPartialResults(scan.getAllowPartialResults()); - } - Boolean loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue(); - if (loadColumnFamiliesOnDemand != null) { - scanBuilder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand); - } - scanBuilder.setMaxVersions(scan.getMaxVersions()); - scan.getColumnFamilyTimeRange().forEach((cf, timeRange) -> { - scanBuilder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(ByteStringer.wrap(cf)) - .setTimeRange(toTimeRange(timeRange)) - .build()); - }); - scanBuilder.setTimeRange(toTimeRange(scan.getTimeRange())); - Map attributes = scan.getAttributesMap(); - if (!attributes.isEmpty()) { - NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { - attributeBuilder.setName(attribute.getKey()); - attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue())); - scanBuilder.addAttribute(attributeBuilder.build()); - } - } - byte[] startRow = scan.getStartRow(); - if (startRow != null && startRow.length > 0) { - scanBuilder.setStartRow(ByteStringer.wrap(startRow)); - } - byte[] stopRow = scan.getStopRow(); - if (stopRow != null && stopRow.length > 0) { - scanBuilder.setStopRow(ByteStringer.wrap(stopRow)); - } - if (scan.hasFilter()) { - scanBuilder.setFilter(ProtobufUtil.toFilter(scan.getFilter())); - } - if (scan.hasFamilies()) { - Column.Builder columnBuilder = Column.newBuilder(); - for (Map.Entry> - family: scan.getFamilyMap().entrySet()) { - columnBuilder.setFamily(ByteStringer.wrap(family.getKey())); - NavigableSet qualifiers = family.getValue(); - columnBuilder.clearQualifier(); - if (qualifiers != null && qualifiers.size() > 0) { - for (byte [] qualifier: qualifiers) { - columnBuilder.addQualifier(ByteStringer.wrap(qualifier)); - } - } - scanBuilder.addColumn(columnBuilder.build()); - } - } - if (scan.getMaxResultsPerColumnFamily() >= 0) { - scanBuilder.setStoreLimit(scan.getMaxResultsPerColumnFamily()); - } - if (scan.getRowOffsetPerColumnFamily() > 0) { - scanBuilder.setStoreOffset(scan.getRowOffsetPerColumnFamily()); - } - if (scan.isReversed()) { - scanBuilder.setReversed(scan.isReversed()); - } - if (scan.getConsistency() == Consistency.TIMELINE) { - scanBuilder.setConsistency(toConsistency(scan.getConsistency())); - } - if (scan.getCaching() > 0) { - scanBuilder.setCaching(scan.getCaching()); - } - long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan); - if (mvccReadPoint > 0) { - scanBuilder.setMvccReadPoint(mvccReadPoint); - } - if (!scan.includeStartRow()) { - scanBuilder.setIncludeStartRow(false); - } - scanBuilder.setIncludeStopRow(scan.includeStopRow()); - if (scan.getReadType() != Scan.ReadType.DEFAULT) { - scanBuilder.setReadType(toReadType(scan.getReadType())); - } - return scanBuilder.build(); - } - - /** - * Convert a protocol buffer Scan to a client Scan - * - * @param proto the protocol buffer Scan to convert - * @return the converted client Scan - * @throws IOException - */ - public static Scan toScan( - final ClientProtos.Scan proto) throws IOException { - byte[] startRow = HConstants.EMPTY_START_ROW; - byte[] stopRow = HConstants.EMPTY_END_ROW; - boolean includeStartRow = true; - boolean includeStopRow = false; - if (proto.hasStartRow()) { - startRow = proto.getStartRow().toByteArray(); - } - if (proto.hasStopRow()) { - stopRow = proto.getStopRow().toByteArray(); - } - if (proto.hasIncludeStartRow()) { - includeStartRow = proto.getIncludeStartRow(); - } - if (proto.hasIncludeStopRow()) { - includeStopRow = proto.getIncludeStopRow(); - } - Scan scan = - new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow); - if (proto.hasCacheBlocks()) { - scan.setCacheBlocks(proto.getCacheBlocks()); - } - if (proto.hasMaxVersions()) { - scan.setMaxVersions(proto.getMaxVersions()); - } - if (proto.hasStoreLimit()) { - scan.setMaxResultsPerColumnFamily(proto.getStoreLimit()); - } - if (proto.hasStoreOffset()) { - scan.setRowOffsetPerColumnFamily(proto.getStoreOffset()); - } - if (proto.hasLoadColumnFamiliesOnDemand()) { - scan.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand()); - } - if (proto.getCfTimeRangeCount() > 0) { - for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { - TimeRange timeRange = protoToTimeRange(cftr.getTimeRange()); - scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); - } - } - if (proto.hasTimeRange()) { - TimeRange timeRange = protoToTimeRange(proto.getTimeRange()); - scan.setTimeRange(timeRange.getMin(), timeRange.getMax()); - } - if (proto.hasFilter()) { - FilterProtos.Filter filter = proto.getFilter(); - scan.setFilter(ProtobufUtil.toFilter(filter)); - } - if (proto.hasBatchSize()) { - scan.setBatch(proto.getBatchSize()); - } - if (proto.hasMaxResultSize()) { - scan.setMaxResultSize(proto.getMaxResultSize()); - } - if (proto.hasSmall()) { - scan.setSmall(proto.getSmall()); - } - if (proto.hasAllowPartialResults()) { - scan.setAllowPartialResults(proto.getAllowPartialResults()); - } - for (NameBytesPair attribute: proto.getAttributeList()) { - scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); - } - if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { - byte[] family = column.getFamily().toByteArray(); - if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { - scan.addColumn(family, qualifier.toByteArray()); - } - } else { - scan.addFamily(family); - } - } - } - if (proto.hasReversed()) { - scan.setReversed(proto.getReversed()); - } - if (proto.hasConsistency()) { - scan.setConsistency(toConsistency(proto.getConsistency())); - } - if (proto.hasCaching()) { - scan.setCaching(proto.getCaching()); - } - if (proto.hasMvccReadPoint()) { - PackagePrivateFieldAccessor.setMvccReadPoint(scan, proto.getMvccReadPoint()); - } - if (scan.isSmall()) { - scan.setReadType(Scan.ReadType.PREAD); - } else if (proto.hasReadType()) { - scan.setReadType(toReadType(proto.getReadType())); - } - return scan; - } - - /** - * Create a protocol buffer Get based on a client Get. - * - * @param get the client Get - * @return a protocol buffer Get - * @throws IOException - */ - public static ClientProtos.Get toGet( - final Get get) throws IOException { - ClientProtos.Get.Builder builder = - ClientProtos.Get.newBuilder(); - builder.setRow(ByteStringer.wrap(get.getRow())); - builder.setCacheBlocks(get.getCacheBlocks()); - builder.setMaxVersions(get.getMaxVersions()); - if (get.getFilter() != null) { - builder.setFilter(ProtobufUtil.toFilter(get.getFilter())); - } - get.getColumnFamilyTimeRange().forEach((cf, timeRange) -> - builder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(ByteStringer.wrap(cf)) - .setTimeRange(toTimeRange(timeRange)).build()) - ); - builder.setTimeRange(toTimeRange(get.getTimeRange())); - Map attributes = get.getAttributesMap(); - if (!attributes.isEmpty()) { - NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { - attributeBuilder.setName(attribute.getKey()); - attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue())); - builder.addAttribute(attributeBuilder.build()); - } - } - if (get.hasFamilies()) { - Column.Builder columnBuilder = Column.newBuilder(); - Map> families = get.getFamilyMap(); - for (Map.Entry> family: families.entrySet()) { - NavigableSet qualifiers = family.getValue(); - columnBuilder.setFamily(ByteStringer.wrap(family.getKey())); - columnBuilder.clearQualifier(); - if (qualifiers != null && qualifiers.size() > 0) { - for (byte[] qualifier: qualifiers) { - columnBuilder.addQualifier(ByteStringer.wrap(qualifier)); - } - } - builder.addColumn(columnBuilder.build()); - } - } - if (get.getMaxResultsPerColumnFamily() >= 0) { - builder.setStoreLimit(get.getMaxResultsPerColumnFamily()); - } - if (get.getRowOffsetPerColumnFamily() > 0) { - builder.setStoreOffset(get.getRowOffsetPerColumnFamily()); - } - if (get.isCheckExistenceOnly()){ - builder.setExistenceOnly(true); - } - if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) { - builder.setConsistency(toConsistency(get.getConsistency())); - } - - Boolean loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); - if (loadColumnFamiliesOnDemand != null) { - builder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand); - } - - return builder.build(); - } - - public static MutationProto toMutation(final MutationType type, final Mutation mutation) - throws IOException { - return toMutation(type, mutation, HConstants.NO_NONCE); - } - - /** - * Create a protocol buffer Mutate based on a client Mutation - * - * @param type - * @param mutation - * @return a protobuf'd Mutation - * @throws IOException - */ - public static MutationProto toMutation(final MutationType type, final Mutation mutation, - final long nonce) throws IOException { - return toMutation(type, mutation, MutationProto.newBuilder(), nonce); - } - - public static MutationProto toMutation(final MutationType type, final Mutation mutation, - MutationProto.Builder builder) throws IOException { - return toMutation(type, mutation, builder, HConstants.NO_NONCE); - } - - public static MutationProto toMutation(final MutationType type, final Mutation mutation, - MutationProto.Builder builder, long nonce) - throws IOException { - builder = getMutationBuilderAndSetCommonFields(type, mutation, builder); - if (nonce != HConstants.NO_NONCE) { - builder.setNonce(nonce); - } - if (type == MutationType.INCREMENT) { - builder.setTimeRange(toTimeRange(((Increment) mutation).getTimeRange())); - } - if (type == MutationType.APPEND) { - builder.setTimeRange(toTimeRange(((Append) mutation).getTimeRange())); - } - ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); - QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); - for (Map.Entry> family: mutation.getFamilyCellMap().entrySet()) { - columnBuilder.clear(); - columnBuilder.setFamily(ByteStringer.wrap(family.getKey())); - for (Cell cell: family.getValue()) { - valueBuilder.clear(); - valueBuilder.setQualifier(ByteStringer.wrap( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); - valueBuilder.setValue(ByteStringer.wrap( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); - valueBuilder.setTimestamp(cell.getTimestamp()); - if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) { - KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte()); - valueBuilder.setDeleteType(toDeleteType(keyValueType)); - } - columnBuilder.addQualifierValue(valueBuilder.build()); - } - builder.addColumnValue(columnBuilder.build()); - } - return builder.build(); - } - - /** - * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. - * Understanding is that the Cell will be transported other than via protobuf. - * @param type - * @param mutation - * @param builder - * @return a protobuf'd Mutation - * @throws IOException - */ - public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, - final MutationProto.Builder builder) throws IOException { - return toMutationNoData(type, mutation, builder, HConstants.NO_NONCE); - } - - /** - * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. - * Understanding is that the Cell will be transported other than via protobuf. - * @param type - * @param mutation - * @return a protobuf'd Mutation - * @throws IOException - */ - public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation) - throws IOException { - MutationProto.Builder builder = MutationProto.newBuilder(); - return toMutationNoData(type, mutation, builder); - } - - public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, - final MutationProto.Builder builder, long nonce) throws IOException { - getMutationBuilderAndSetCommonFields(type, mutation, builder); - builder.setAssociatedCellCount(mutation.size()); - if (mutation instanceof Increment) { - builder.setTimeRange(toTimeRange(((Increment)mutation).getTimeRange())); - } - if (mutation instanceof Append) { - builder.setTimeRange(toTimeRange(((Append)mutation).getTimeRange())); - } - if (nonce != HConstants.NO_NONCE) { - builder.setNonce(nonce); - } - return builder.build(); - } - - /** - * Code shared by {@link #toMutation(MutationType, Mutation)} and - * {@link #toMutationNoData(MutationType, Mutation)} - * @param type - * @param mutation - * @return A partly-filled out protobuf'd Mutation. - */ - private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type, - final Mutation mutation, MutationProto.Builder builder) { - builder.setRow(ByteStringer.wrap(mutation.getRow())); - builder.setMutateType(type); - builder.setDurability(toDurability(mutation.getDurability())); - builder.setTimestamp(mutation.getTimestamp()); - Map attributes = mutation.getAttributesMap(); - if (!attributes.isEmpty()) { - NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { - attributeBuilder.setName(attribute.getKey()); - attributeBuilder.setValue(ByteStringer.wrap(attribute.getValue())); - builder.addAttribute(attributeBuilder.build()); - } - } - return builder; - } - - /** - * Convert a client Result to a protocol buffer Result - * - * @param result the client Result to convert - * @return the converted protocol buffer Result - */ - public static ClientProtos.Result toResult(final Result result) { - if (result.getExists() != null) { - return toResult(result.getExists(), result.isStale()); - } - - Cell[] cells = result.rawCells(); - if (cells == null || cells.length == 0) { - return result.isStale() ? EMPTY_RESULT_PB_STALE : EMPTY_RESULT_PB; - } - - ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); - for (Cell c : cells) { - builder.addCell(toCell(c)); - } - - builder.setStale(result.isStale()); - builder.setPartial(result.mayHaveMoreCellsInRow()); - - return builder.build(); - } - - /** - * Convert a client Result to a protocol buffer Result - * - * @param existence the client existence to send - * @return the converted protocol buffer Result - */ - public static ClientProtos.Result toResult(final boolean existence, boolean stale) { - if (stale){ - return existence ? EMPTY_RESULT_PB_EXISTS_TRUE_STALE : EMPTY_RESULT_PB_EXISTS_FALSE_STALE; - } else { - return existence ? EMPTY_RESULT_PB_EXISTS_TRUE : EMPTY_RESULT_PB_EXISTS_FALSE; - } - } - - /** - * Convert a client Result to a protocol buffer Result. - * The pb Result does not include the Cell data. That is for transport otherwise. - * - * @param result the client Result to convert - * @return the converted protocol buffer Result - */ - public static ClientProtos.Result toResultNoData(final Result result) { - if (result.getExists() != null) return toResult(result.getExists(), result.isStale()); - int size = result.size(); - if (size == 0) return result.isStale() ? EMPTY_RESULT_PB_STALE : EMPTY_RESULT_PB; - ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); - builder.setAssociatedCellCount(size); - builder.setStale(result.isStale()); - return builder.build(); - } - - /** - * Convert a protocol buffer Result to a client Result - * - * @param proto the protocol buffer Result to convert - * @return the converted client Result - */ - public static Result toResult(final ClientProtos.Result proto) { - if (proto.hasExists()) { - if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; - } - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; - } - - List values = proto.getCellList(); - if (values.isEmpty()){ - return proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT; - } - - List cells = new ArrayList<>(values.size()); - ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (CellProtos.Cell c : values) { - cells.add(toCell(builder, c)); - } - return Result.create(cells, null, proto.getStale(), proto.getPartial()); - } - - /** - * Convert a protocol buffer Result to a client Result - * - * @param proto the protocol buffer Result to convert - * @param scanner Optional cell scanner. - * @return the converted client Result - * @throws IOException - */ - public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner) - throws IOException { - List values = proto.getCellList(); - - if (proto.hasExists()) { - if (!values.isEmpty() || - (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0)) { - throw new IllegalArgumentException("bad proto: exists with cells is no allowed " + proto); - } - if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; - } - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; - } - - // TODO: Unit test that has some Cells in scanner and some in the proto. - List cells = null; - if (proto.hasAssociatedCellCount()) { - int count = proto.getAssociatedCellCount(); - cells = new ArrayList<>(count + values.size()); - for (int i = 0; i < count; i++) { - if (!scanner.advance()) throw new IOException("Failed get " + i + " of " + count); - cells.add(scanner.current()); - } - } - - if (!values.isEmpty()){ - if (cells == null) cells = new ArrayList<>(values.size()); - ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (CellProtos.Cell c: values) { - cells.add(toCell(builder, c)); - } - } - - return (cells == null || cells.isEmpty()) - ? (proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT) - : Result.create(cells, null, proto.getStale()); - } - - - /** - * Convert a ByteArrayComparable to a protocol buffer Comparator - * - * @param comparator the ByteArrayComparable to convert - * @return the converted protocol buffer Comparator - */ - public static ComparatorProtos.Comparator toComparator(ByteArrayComparable comparator) { - ComparatorProtos.Comparator.Builder builder = ComparatorProtos.Comparator.newBuilder(); - builder.setName(comparator.getClass().getName()); - builder.setSerializedComparator(ByteStringer.wrap(comparator.toByteArray())); - return builder.build(); - } - - /** - * Convert a protocol buffer Comparator to a ByteArrayComparable - * - * @param proto the protocol buffer Comparator to convert - * @return the converted ByteArrayComparable - */ - @SuppressWarnings("unchecked") - public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto) - throws IOException { - String type = proto.getName(); - String funcName = "parseFrom"; - byte [] value = proto.getSerializedComparator().toByteArray(); - try { - Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); - Method parseFrom = c.getMethod(funcName, byte[].class); - if (parseFrom == null) { - throw new IOException("Unable to locate function: " + funcName + " in type: " + type); - } - return (ByteArrayComparable)parseFrom.invoke(null, value); - } catch (Exception e) { - throw new IOException(e); - } - } - - /** - * Convert a protocol buffer Filter to a client Filter - * - * @param proto the protocol buffer Filter to convert - * @return the converted Filter - */ - @SuppressWarnings("unchecked") - public static Filter toFilter(FilterProtos.Filter proto) throws IOException { - String type = proto.getName(); - final byte [] value = proto.getSerializedFilter().toByteArray(); - String funcName = "parseFrom"; - try { - Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); - Method parseFrom = c.getMethod(funcName, byte[].class); - if (parseFrom == null) { - throw new IOException("Unable to locate function: " + funcName + " in type: " + type); - } - return (Filter)parseFrom.invoke(c, value); - } catch (Exception e) { - // Either we couldn't instantiate the method object, or "parseFrom" failed. - // In either case, let's not retry. - throw new DoNotRetryIOException(e); - } - } - - /** - * Convert a client Filter to a protocol buffer Filter - * - * @param filter the Filter to convert - * @return the converted protocol buffer Filter - */ - public static FilterProtos.Filter toFilter(Filter filter) throws IOException { - FilterProtos.Filter.Builder builder = FilterProtos.Filter.newBuilder(); - builder.setName(filter.getClass().getName()); - builder.setSerializedFilter(ByteStringer.wrap(filter.toByteArray())); - return builder.build(); - } - - /** - * Convert a delete KeyValue type to protocol buffer DeleteType. - * - * @param type - * @return protocol buffer DeleteType - * @throws IOException - */ - public static DeleteType toDeleteType( - KeyValue.Type type) throws IOException { - switch (type) { - case Delete: - return DeleteType.DELETE_ONE_VERSION; - case DeleteColumn: - return DeleteType.DELETE_MULTIPLE_VERSIONS; - case DeleteFamily: - return DeleteType.DELETE_FAMILY; - case DeleteFamilyVersion: - return DeleteType.DELETE_FAMILY_VERSION; - default: - throw new IOException("Unknown delete type: " + type); - } - } - - /** - * Convert a protocol buffer DeleteType to delete KeyValue type. - * - * @param type The DeleteType - * @return The type. - * @throws IOException - */ - public static KeyValue.Type fromDeleteType( - DeleteType type) throws IOException { - switch (type) { - case DELETE_ONE_VERSION: - return KeyValue.Type.Delete; - case DELETE_MULTIPLE_VERSIONS: - return KeyValue.Type.DeleteColumn; - case DELETE_FAMILY: - return KeyValue.Type.DeleteFamily; - case DELETE_FAMILY_VERSION: - return KeyValue.Type.DeleteFamilyVersion; - default: - throw new IOException("Unknown delete type: " + type); - } - } - - /** - * Convert a stringified protocol buffer exception Parameter to a Java Exception - * - * @param parameter the protocol buffer Parameter to convert - * @return the converted Exception - * @throws IOException if failed to deserialize the parameter - */ - @SuppressWarnings("unchecked") - public static Throwable toException(final NameBytesPair parameter) throws IOException { - if (parameter == null || !parameter.hasValue()) return null; - String desc = parameter.getValue().toStringUtf8(); - String type = parameter.getName(); - try { - Class c = - (Class)Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); - Constructor cn = null; - try { - cn = c.getDeclaredConstructor(String.class); - return cn.newInstance(desc); - } catch (NoSuchMethodException e) { - // Could be a raw RemoteException. See HBASE-8987. - cn = c.getDeclaredConstructor(String.class, String.class); - return cn.newInstance(type, desc); - } - } catch (Exception e) { - throw new IOException(e); - } - } - -// Start helpers for Client - - @SuppressWarnings("unchecked") - public static T newServiceStub(Class service, RpcChannel channel) - throws Exception { - return (T)Methods.call(service, null, "newStub", - new Class[]{ RpcChannel.class }, new Object[]{ channel }); - } - -// End helpers for Client -// Start helpers for Admin - - /** - * A helper to get the info of a region server using admin protocol. - * @return the server name - */ - public static ServerInfo getServerInfo(final RpcController controller, - final AdminService.BlockingInterface admin) - throws IOException { - GetServerInfoRequest request = buildGetServerInfoRequest(); - try { - GetServerInfoResponse response = admin.getServerInfo(controller, request); - return response.getServerInfo(); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - - - /** - * @see #buildGetServerInfoRequest() - */ - private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = - GetServerInfoRequest.newBuilder().build(); - - /** - * Create a new GetServerInfoRequest - * - * @return a GetServerInfoRequest - */ - public static GetServerInfoRequest buildGetServerInfoRequest() { - return GET_SERVER_INFO_REQUEST; - } - - public static ScanMetrics toScanMetrics(final byte[] bytes) { - Parser parser = MapReduceProtos.ScanMetrics.PARSER; - MapReduceProtos.ScanMetrics pScanMetrics = null; - try { - pScanMetrics = parser.parseFrom(bytes); - } catch (InvalidProtocolBufferException e) { - //Ignored there are just no key values to add. - } - ScanMetrics scanMetrics = new ScanMetrics(); - if (pScanMetrics != null) { - for (HBaseProtos.NameInt64Pair pair : pScanMetrics.getMetricsList()) { - if (pair.hasName() && pair.hasValue()) { - scanMetrics.setCounter(pair.getName(), pair.getValue()); - } - } - } - return scanMetrics; - } - - /** - * Unwraps an exception from a protobuf service into the underlying (expected) IOException. This - * method will always throw an exception. - * @param se the {@code ServiceException} instance to convert into an {@code IOException} - * @throws NullPointerException if {@code se} is {@code null} - */ - public static void toIOException(ServiceException se) throws IOException { - Objects.requireNonNull(se, "Service exception cannot be null"); - - Throwable cause = se.getCause(); - if (cause != null && cause instanceof IOException) { - throw (IOException)cause; - } - throw new IOException(se); - } - - public static CellProtos.Cell toCell(final Cell kv) { - // Doing this is going to kill us if we do it for all data passed. - // St.Ack 20121205 - CellProtos.Cell.Builder kvbuilder = CellProtos.Cell.newBuilder(); - kvbuilder.setRow(ByteStringer.wrap(kv.getRowArray(), kv.getRowOffset(), - kv.getRowLength())); - kvbuilder.setFamily(ByteStringer.wrap(kv.getFamilyArray(), - kv.getFamilyOffset(), kv.getFamilyLength())); - kvbuilder.setQualifier(ByteStringer.wrap(kv.getQualifierArray(), - kv.getQualifierOffset(), kv.getQualifierLength())); - kvbuilder.setCellType(CellProtos.CellType.valueOf(kv.getTypeByte())); - kvbuilder.setTimestamp(kv.getTimestamp()); - kvbuilder.setValue(ByteStringer.wrap(kv.getValueArray(), kv.getValueOffset(), - kv.getValueLength())); - return kvbuilder.build(); - } - - public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell) { - return cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) - .setValue(cell.getValue().toByteArray()) - .build(); - } - - /** - * Print out some subset of a MutationProto rather than all of it and its data - * @param proto Protobuf to print out - * @return Short String of mutation proto - */ - static String toShortString(final MutationProto proto) { - return "row=" + Bytes.toString(proto.getRow().toByteArray()) + - ", type=" + proto.getMutateType().toString(); - } - - public static TableName toTableName(HBaseProtos.TableName tableNamePB) { - return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(), - tableNamePB.getQualifier().asReadOnlyByteBuffer()); - } - - public static HBaseProtos.TableName toProtoTableName(TableName tableName) { - return HBaseProtos.TableName.newBuilder() - .setNamespace(ByteStringer.wrap(tableName.getNamespace())) - .setQualifier(ByteStringer.wrap(tableName.getQualifier())).build(); - } - - /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays - * @param builder current message builder - * @param b byte array - * @throws IOException - */ - public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException { - final CodedInputStream codedInput = CodedInputStream.newInstance(b); - codedInput.setSizeLimit(b.length); - builder.mergeFrom(codedInput); - codedInput.checkLastTagWas(0); - } - - /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays - * @param builder current message builder - * @param b byte array - * @param offset - * @param length - * @throws IOException - */ - public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length) - throws IOException { - final CodedInputStream codedInput = CodedInputStream.newInstance(b, offset, length); - codedInput.setSizeLimit(length); - builder.mergeFrom(codedInput); - codedInput.checkLastTagWas(0); - } - - private static TimeRange protoToTimeRange(HBaseProtos.TimeRange timeRange) throws IOException { - long minStamp = 0; - long maxStamp = Long.MAX_VALUE; - if (timeRange.hasFrom()) { - minStamp = timeRange.getFrom(); - } - if (timeRange.hasTo()) { - maxStamp = timeRange.getTo(); - } - return new TimeRange(minStamp, maxStamp); - } - - /** - * Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type} - * from {@link SnapshotType} - * @param type the SnapshotDescription type - * @return the protobuf SnapshotDescription type - */ - public static HBaseProtos.SnapshotDescription.Type - createProtosSnapShotDescType(SnapshotType type) { - return HBaseProtos.SnapshotDescription.Type.valueOf(type.name()); - } - - /** - * Convert a byte array to a protocol buffer RegionSpecifier - * - * @param type the region specifier type - * @param value the region specifier byte array value - * @return a protocol buffer RegionSpecifier - */ - public static RegionSpecifier buildRegionSpecifier( - final RegionSpecifierType type, final byte[] value) { - RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder(); - regionBuilder.setValue(ByteStringer.wrap(value)); - regionBuilder.setType(type); - return regionBuilder.build(); - } - - /** - * Get a ServerName from the passed in data bytes. - * @param data Data with a serialize server name in it; can handle the old style - * servername where servername was host and port. Works too with data that - * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that - * has a serialized {@link ServerName} in it. - * @return Returns null if data is null else converts passed data - * to a ServerName instance. - * @throws DeserializationException - */ - public static ServerName toServerName(final byte [] data) throws DeserializationException { - if (data == null || data.length <= 0) return null; - if (ProtobufMagic.isPBMagicPrefix(data)) { - int prefixLen = ProtobufMagic.lengthOfPBMagic(); - try { - ZooKeeperProtos.Master rss = - ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = - rss.getMaster(); - return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); - } catch (/*InvalidProtocolBufferException*/IOException e) { - // A failed parse of the znode is pretty catastrophic. Rather than loop - // retrying hoping the bad bytes will changes, and rather than change - // the signature on this method to add an IOE which will send ripples all - // over the code base, throw a RuntimeException. This should "never" happen. - // Fail fast if it does. - throw new DeserializationException(e); - } - } - // The str returned could be old style -- pre hbase-1502 -- which was - // hostname and port seperated by a colon rather than hostname, port and - // startcode delimited by a ','. - String str = Bytes.toString(data); - int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR); - if (index != -1) { - // Presume its ServerName serialized with versioned bytes. - return ServerName.parseVersionedServerName(data); - } - // Presume it a hostname:port format. - String hostname = Addressing.parseHostname(str); - int port = Addressing.parsePort(str); - return ServerName.valueOf(hostname, port, -1L); - } - - public static HBaseProtos.TimeRange toTimeRange(TimeRange timeRange) { - if (timeRange == null) { - timeRange = TimeRange.allTime(); - } - return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()).setTo(timeRange.getMax()) - .build(); - } - - public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) { - RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName()); - for (HBaseProtos.ServerName el : proto.getServersList()) { - RSGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort())); - } - for (HBaseProtos.TableName pTableName : proto.getTablesList()) { - RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName)); - } - return RSGroupInfo; - } - - public static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) { - List tables = new ArrayList<>(pojo.getTables().size()); - for (TableName arg : pojo.getTables()) { - tables.add(ProtobufUtil.toProtoTableName(arg)); - } - List hostports = new ArrayList<>(pojo.getServers().size()); - for (Address el : pojo.getServers()) { - hostports.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) - .setPort(el.getPort()).build()); - } - return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()).addAllServers(hostports) - .addAllTables(tables).build(); - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 557e67b47ad..41b4482efd8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2992,7 +2992,7 @@ public final class ProtobufUtil { /** * Creates {@link CompactionState} from - * {@link org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState} + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState} * state * @param state the protobuf CompactionState * @return CompactionState @@ -3011,7 +3011,8 @@ public final class ProtobufUtil { } /** - * Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type} + * Creates + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type} * from {@link SnapshotType} * @param type the SnapshotDescription type * @return the protobuf SnapshotDescription type @@ -3022,7 +3023,8 @@ public final class ProtobufUtil { } /** - * Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type} + * Creates + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type} * from the type of SnapshotDescription string * @param snapshotDesc string representing the snapshot description type * @return the protobuf SnapshotDescription type @@ -3033,8 +3035,8 @@ public final class ProtobufUtil { } /** - * Creates {@link SnapshotType} from the type of - * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} + * Creates {@link SnapshotType} from the + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type} * @param type the snapshot description type * @return the protobuf SnapshotDescription type */ @@ -3044,7 +3046,7 @@ public final class ProtobufUtil { /** * Convert from {@link SnapshotDescription} to - * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription} * @param snapshotDesc the POJO SnapshotDescription * @return the protobuf SnapshotDescription */ @@ -3076,7 +3078,7 @@ public final class ProtobufUtil { /** * Convert from - * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} to + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription} to * {@link SnapshotDescription} * @param snapshotDesc the protobuf SnapshotDescription * @return the POJO SnapshotDescription diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java index d9fb983231b..f2f917e011a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java @@ -54,12 +54,12 @@ public final class ClientSnapshotDescriptionUtils { } /** - * Returns a single line (no \n) representation of snapshot metadata. Use this instead of - * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription#toString()}. + * Returns a single line (no \n) representation of snapshot metadata. Use this instead of the + * {@code toString} method of + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription}. * We don't replace - * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}'s + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription}'s * {@code toString}, because it is auto-generated by protoc. - * * @param snapshot description of the snapshot * @return single line string with a summary of the snapshot parameters */ diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml index dc2f94071a5..1bd867363f6 100644 --- a/hbase-endpoint/pom.xml +++ b/hbase-endpoint/pom.xml @@ -107,10 +107,6 @@ test-jar test
- - org.apache.hbase - hbase-protocol - org.apache.hbase hbase-protocol-shaded diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java index 52d79fb89ba..2f5024737db 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.mapreduce.ExportUtils; import org.apache.hadoop.hbase.mapreduce.Import; -import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos; import org.apache.hadoop.hbase.security.HBaseKerberosUtils; import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting; import org.apache.hadoop.hbase.security.User; @@ -82,6 +81,8 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos; + @Category({MediumTests.class}) public class TestSecureExport { @ClassRule diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index 99937de964f..e95d6a22fbb 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -91,10 +91,6 @@ org.apache.hbase hbase-common - - org.apache.hbase - hbase-protocol - org.apache.hbase hbase-client diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBCell.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBCell.java index b1ec97ea5af..9962797d3dd 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBCell.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBCell.java @@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.types; import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; import java.io.IOException; -import org.apache.hadoop.hbase.protobuf.generated.CellProtos; +import org.apache.hadoop.hbase.example.protobuf.generated.CellMessage; import org.apache.hadoop.hbase.util.PositionedByteRange; import org.apache.yetus.audience.InterfaceAudience; @@ -28,15 +28,15 @@ import org.apache.yetus.audience.InterfaceAudience; * An example for using protobuf objects with {@link DataType} API. */ @InterfaceAudience.Private -public class PBCell extends PBType { +public class PBCell extends PBType { @Override - public Class encodedClass() { - return CellProtos.Cell.class; + public Class encodedClass() { + return CellMessage.Cell.class; } @Override public int skip(PositionedByteRange src) { - CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder(); + CellMessage.Cell.Builder builder = CellMessage.Cell.newBuilder(); CodedInputStream is = inputStreamFromByteRange(src); is.setSizeLimit(src.getLength()); try { @@ -50,12 +50,12 @@ public class PBCell extends PBType { } @Override - public CellProtos.Cell decode(PositionedByteRange src) { - CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder(); + public CellMessage.Cell decode(PositionedByteRange src) { + CellMessage.Cell.Builder builder = CellMessage.Cell.newBuilder(); CodedInputStream is = inputStreamFromByteRange(src); is.setSizeLimit(src.getLength()); try { - CellProtos.Cell ret = builder.mergeFrom(is).build(); + CellMessage.Cell ret = builder.mergeFrom(is).build(); src.setPosition(src.getPosition() + is.getTotalBytesRead()); return ret; } catch (IOException e) { @@ -64,7 +64,7 @@ public class PBCell extends PBType { } @Override - public int encode(PositionedByteRange dst, CellProtos.Cell val) { + public int encode(PositionedByteRange dst, CellMessage.Cell val) { CodedOutputStream os = outputStreamFromByteRange(dst); try { int before = os.spaceLeft(), after, written; diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBType.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBType.java index 15d56ff4054..8c6a1d9660f 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBType.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBType.java @@ -25,8 +25,7 @@ import org.apache.hadoop.hbase.util.PositionedByteRange; import org.apache.yetus.audience.InterfaceAudience; /** - * A base-class for {@link DataType} implementations backed by protobuf. See - * {@code PBKeyValue} in {@code hbase-examples} module. + * A base-class for {@link DataType} implementations backed by protobuf. See {@link PBCell}. */ @InterfaceAudience.Private public abstract class PBType implements DataType { @@ -58,7 +57,8 @@ public abstract class PBType implements DataType { /** * Create a {@link CodedInputStream} from a {@link PositionedByteRange}. Be sure to update * {@code src}'s position after consuming from the stream. - *

For example: + *

+ * For example: *

    * Foo.Builder builder = ...
    * CodedInputStream is = inputStreamFromByteRange(src);
@@ -67,16 +67,15 @@ public abstract class PBType implements DataType {
    * 
*/ public static CodedInputStream inputStreamFromByteRange(PositionedByteRange src) { - return CodedInputStream.newInstance( - src.getBytes(), - src.getOffset() + src.getPosition(), + return CodedInputStream.newInstance(src.getBytes(), src.getOffset() + src.getPosition(), src.getRemaining()); } /** * Create a {@link CodedOutputStream} from a {@link PositionedByteRange}. Be sure to update * {@code dst}'s position after writing to the stream. - *

For example: + *

+ * For example: *

    * CodedOutputStream os = outputStreamFromByteRange(dst);
    * int before = os.spaceLeft(), after, written;
@@ -87,10 +86,7 @@ public abstract class PBType implements DataType {
    * 
*/ public static CodedOutputStream outputStreamFromByteRange(PositionedByteRange dst) { - return CodedOutputStream.newInstance( - dst.getBytes(), - dst.getOffset() + dst.getPosition(), - dst.getRemaining() - ); + return CodedOutputStream.newInstance(dst.getBytes(), dst.getOffset() + dst.getPosition(), + dst.getRemaining()); } } diff --git a/hbase-protocol/src/main/protobuf/TestProcedure.proto b/hbase-examples/src/main/protobuf/CellMessage.proto similarity index 78% rename from hbase-protocol/src/main/protobuf/TestProcedure.proto rename to hbase-examples/src/main/protobuf/CellMessage.proto index 982ea674b40..fe85df1f062 100644 --- a/hbase-protocol/src/main/protobuf/TestProcedure.proto +++ b/hbase-examples/src/main/protobuf/CellMessage.proto @@ -16,11 +16,8 @@ * limitations under the License. */ syntax = "proto2"; +package org.apache.hadoop.hbase.example.protobuf.generated; -option java_package = "org.apache.hadoop.hbase.ipc.protobuf.generated"; -option java_outer_classname = "TestProcedureProtos"; -option java_generic_services = true; - -message TestTableDDLStateData { - required string table_name = 1; +message Cell { + optional bytes row = 1; } diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java index b5a880b4e3f..5ca3cf4e5cd 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java @@ -18,31 +18,23 @@ package org.apache.hadoop.hbase.types; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderType; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; +import com.google.protobuf.ByteString; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.CellProtos; +import org.apache.hadoop.hbase.example.protobuf.generated.CellMessage; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.PositionedByteRange; import org.apache.hadoop.hbase.util.SimplePositionedByteRange; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class, MiscTests.class}) +@Category({ SmallTests.class, MiscTests.class }) public class TestPBCell { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPBCell.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestPBCell.class); private static final PBCell CODEC = new PBCell(); @@ -51,16 +43,14 @@ public class TestPBCell { */ @Test public void testRoundTrip() { - final Cell cell = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("fam"), - Bytes.toBytes("qual"), Bytes.toBytes("val")); - CellProtos.Cell c = ProtobufUtil.toCell(cell), decoded; - PositionedByteRange pbr = new SimplePositionedByteRange(c.getSerializedSize()); + CellMessage.Cell cell = + CellMessage.Cell.newBuilder().setRow(ByteString.copyFromUtf8("row")).build(); + PositionedByteRange pbr = new SimplePositionedByteRange(cell.getSerializedSize()); pbr.setPosition(0); - int encodedLength = CODEC.encode(pbr, c); + int encodedLength = CODEC.encode(pbr, cell); pbr.setPosition(0); - decoded = CODEC.decode(pbr); + CellMessage.Cell decoded = CODEC.decode(pbr); assertEquals(encodedLength, pbr.getPosition()); - assertTrue(CellUtil.equals(cell, ProtobufUtil - .toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), decoded))); + assertEquals("row", decoded.getRow().toStringUtf8()); } } diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 6b173f0ffd6..460879d214b 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -159,10 +159,6 @@ hbase-common jar
- - org.apache.hbase - hbase-protocol - org.apache.hbase hbase-protocol-shaded diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index 97053ef8a1b..9b519dd1fe0 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -105,18 +105,6 @@ test-jar test - - - org.apache.hbase - hbase-protocol - - - - com.google.protobuf - protobuf-java - org.apache.hbase hbase-protocol-shaded diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index e38ee80598d..64c0ad19142 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -805,7 +805,6 @@ public class TableMapReduceUtil { addDependencyJarsForClasses(conf, // explicitly pull a class from each module org.apache.hadoop.hbase.HConstants.class, // hbase-common - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class, // hbase-protocol org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, // hbase-protocol-shaded org.apache.hadoop.hbase.client.Put.class, // hbase-client org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java index 9ee649bd58b..a78ba8196ca 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.CellVisibility; @@ -75,6 +74,8 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; + @Category({MapReduceTests.class, LargeTests.class}) public class TestImportTSVWithVisibilityLabels implements Configurable { diff --git a/hbase-protocol/README.txt b/hbase-protocol/README.txt deleted file mode 100644 index a6354e831c8..00000000000 --- a/hbase-protocol/README.txt +++ /dev/null @@ -1,13 +0,0 @@ -ON PROTOBUFS -This maven module has core protobuf definition files ('.protos') used by hbase -Coprocessor Endpoints that ship with hbase core including tests. Coprocessor -Endpoints are meant to be standalone, independent code not reliant on hbase -internals. They define their Service using protobuf. The protobuf version -they use can be distinct from that used by HBase internally since HBase started -shading its protobuf references. Endpoints have no access to the shaded protobuf -hbase uses. They do have access to the content of hbase-protocol -- the -.protos found in here -- but avoid using as much of this as you can as it is -liable to change. - -Generation of java files from protobuf .proto files included here is done as -part of the build. diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml deleted file mode 100644 index 6f3a42a7f8b..00000000000 --- a/hbase-protocol/pom.xml +++ /dev/null @@ -1,216 +0,0 @@ - - - - 4.0.0 - - hbase-build-configuration - org.apache.hbase - 3.0.0-SNAPSHOT - ../hbase-build-configuration - - hbase-protocol - Apache HBase - Protocol - Protobuf protocol classes used by HBase to communicate. - - true - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - - - - com.google.code.maven-replacer-plugin - replacer - 1.5.3 - - - process-sources - - replace - - - - - ${basedir}/target/generated-sources/ - - **/*.java - - - true - - - (public)(\W+static)?(\W+final)?(\W+class) - @javax.annotation.Generated("proto") $1$2$3$4 - - - - (@javax.annotation.Generated\("proto"\) ){2} - $1 - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - - - - com.google.protobuf - protobuf-java - - - org.slf4j - slf4j-api - - - - - - skipProtocolTests - - - skipProtocolTests - - - - true - true - - - - build-with-jdk11 - - [1.11,) - - - - javax.annotation - javax.annotation-api - - - - - eclipse-specific - - - m2e.version - - - - - - - - org.eclipse.m2e - lifecycle-mapping - 1.0.0 - - - - - - org.apache.hadoop - hadoop-maven-plugins - [2.0.5-alpha,) - - protoc - - - - - - - - - - com.google.code.maven-replacer-plugin - - replacer - [1.5.3,) - - replace - - - - - - - - - - - - - - - - diff --git a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java deleted file mode 100644 index de2124f2990..00000000000 --- a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.protobuf; // This is a lie. - -import org.apache.yetus.audience.InterfaceAudience; - -/** - * Helper class to extract byte arrays from {@link ByteString} without copy. - *

- * Without this protobufs would force us to copy every single byte array out - * of the objects de-serialized from the wire (which already do one copy, on - * top of the copies the JVM does to go from kernel buffer to C buffer and - * from C buffer to JVM buffer). - * - * @since 0.96.1 - */ -@InterfaceAudience.Private -public final class HBaseZeroCopyByteString extends LiteralByteString { - // Gotten from AsyncHBase code base with permission. - /** Private constructor so this class cannot be instantiated. */ - private HBaseZeroCopyByteString() { - super(null); - throw new UnsupportedOperationException("Should never be here."); - } - - /** - * Wraps a byte array in a {@link ByteString} without copying it. - * @param array array to be wrapped - * @return wrapped array - */ - public static ByteString wrap(final byte[] array) { - return new LiteralByteString(array); - } - - /** - * Wraps a subset of a byte array in a {@link ByteString} without copying it. - * @param array array to be wrapped - * @param offset from - * @param length length - * @return wrapped array - */ - public static ByteString wrap(final byte[] array, int offset, int length) { - return new BoundedByteString(array, offset, length); - } - - // TODO: - // ZeroCopyLiteralByteString.wrap(this.buf, 0, this.count); - - /** - * Extracts the byte array from the given {@link ByteString} without copy. - * @param buf A buffer from which to extract the array. This buffer must be - * actually an instance of a {@code LiteralByteString}. - * @return byte[] representation - */ - public static byte[] zeroCopyGetBytes(final ByteString buf) { - if (buf instanceof LiteralByteString) { - return ((LiteralByteString) buf).bytes; - } - throw new UnsupportedOperationException("Need a LiteralByteString, got a " - + buf.getClass().getName()); - } -} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java deleted file mode 100644 index 581741d835b..00000000000 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import com.google.protobuf.ByteString; -import com.google.protobuf.HBaseZeroCopyByteString; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Hack to workaround HBASE-10304 issue that keeps bubbling up when a mapreduce context. - */ -@InterfaceAudience.Private -public final class ByteStringer { - private static final Logger LOG = LoggerFactory.getLogger(ByteStringer.class); - - /** - * Flag set at class loading time. - */ - private static boolean USE_ZEROCOPYBYTESTRING = true; - - // Can I classload HBaseZeroCopyByteString without IllegalAccessError? - // If we can, use it passing ByteStrings to pb else use native ByteString though more costly - // because it makes a copy of the passed in array. - static { - try { - HBaseZeroCopyByteString.wrap(new byte [0]); - } catch (IllegalAccessError iae) { - USE_ZEROCOPYBYTESTRING = false; - LOG.debug("Failed to classload HBaseZeroCopyByteString: " + iae.toString()); - } - } - - private ByteStringer() { - super(); - } - - /** - * Wraps a byte array in a {@link ByteString} without copying it. - */ - public static ByteString wrap(final byte[] array) { - return USE_ZEROCOPYBYTESTRING? HBaseZeroCopyByteString.wrap(array): ByteString.copyFrom(array); - } - - /** - * Wraps a subset of a byte array in a {@link ByteString} without copying it. - */ - public static ByteString wrap(final byte[] array, int offset, int length) { - return USE_ZEROCOPYBYTESTRING? HBaseZeroCopyByteString.wrap(array, offset, length): - ByteString.copyFrom(array, offset, length); - } -} diff --git a/hbase-protocol/src/main/protobuf/AccessControl.proto b/hbase-protocol/src/main/protobuf/AccessControl.proto deleted file mode 100644 index c35bb5bfbae..00000000000 --- a/hbase-protocol/src/main/protobuf/AccessControl.proto +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "AccessControlProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; - -message Permission { - enum Action { - READ = 0; - WRITE = 1; - EXEC = 2; - CREATE = 3; - ADMIN = 4; - } - enum Type { - Global = 1; - Namespace = 2; - Table = 3; - } - required Type type = 1; - optional GlobalPermission global_permission = 2; - optional NamespacePermission namespace_permission = 3; - optional TablePermission table_permission = 4; -} - -message TablePermission { - optional TableName table_name = 1; - optional bytes family = 2; - optional bytes qualifier = 3; - repeated Permission.Action action = 4; -} - -message NamespacePermission { - optional bytes namespace_name = 1; - repeated Permission.Action action = 2; -} - -message GlobalPermission { - repeated Permission.Action action = 1; -} - -message UserPermission { - required bytes user = 1; - required Permission permission = 3; -} - -/** - * Content of the /hbase/acl/ znode. - */ -message UsersAndPermissions { - message UserPermissions { - required bytes user = 1; - repeated Permission permissions = 2; - } - - repeated UserPermissions user_permissions = 1; -} - -message GrantRequest { - required UserPermission user_permission = 1; - optional bool merge_existing_permissions = 2 [default = false]; -} - -message GrantResponse { -} - -message RevokeRequest { - required UserPermission user_permission = 1; -} - -message RevokeResponse { -} - -message GetUserPermissionsRequest { - optional Permission.Type type = 1; - optional TableName table_name = 2; - optional bytes namespace_name = 3; - optional bytes column_family = 4; - optional bytes column_qualifier = 5; - optional bytes user_name = 6; -} - -message GetUserPermissionsResponse { - repeated UserPermission user_permission = 1; -} - -message CheckPermissionsRequest { - repeated Permission permission = 1; -} - -message CheckPermissionsResponse { -} - -message HasPermissionRequest { - required TablePermission table_permission = 1; - required bytes user_name = 2; -} - -message HasPermissionResponse { - optional bool has_permission = 1; -} - -service AccessControlService { - rpc Grant(GrantRequest) - returns (GrantResponse); - - rpc Revoke(RevokeRequest) - returns (RevokeResponse); - - rpc GetUserPermissions(GetUserPermissionsRequest) - returns (GetUserPermissionsResponse); - - rpc CheckPermissions(CheckPermissionsRequest) - returns (CheckPermissionsResponse); - - rpc HasPermission(HasPermissionRequest) - returns (HasPermissionResponse); -} diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto deleted file mode 100644 index 68194d63b1b..00000000000 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ /dev/null @@ -1,310 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers that are used for Admin service. -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "AdminProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; -import "WAL.proto"; - -message GetRegionInfoRequest { - required RegionSpecifier region = 1; - optional bool compaction_state = 2; -} - -message GetRegionInfoResponse { - required RegionInfo region_info = 1; - optional CompactionState compaction_state = 2; - // optional bool DEPRECATED_isRecovering = 3; - - enum CompactionState { - NONE = 0; - MINOR = 1; - MAJOR = 2; - MAJOR_AND_MINOR = 3; - } -} - -/** - * Get a list of store files for a set of column families in a particular region. - * If no column family is specified, get the store files for all column families. - */ -message GetStoreFileRequest { - required RegionSpecifier region = 1; - repeated bytes family = 2; -} - -message GetStoreFileResponse { - repeated string store_file = 1; -} - -message GetOnlineRegionRequest { -} - -message GetOnlineRegionResponse { - repeated RegionInfo region_info = 1; -} - -message OpenRegionRequest { - repeated RegionOpenInfo open_info = 1; - // the intended server for this RPC. - optional uint64 serverStartCode = 2; - // wall clock time from master - optional uint64 master_system_time = 5; - - message RegionOpenInfo { - required RegionInfo region = 1; - optional uint32 version_of_offline_node = 2; - repeated ServerName favored_nodes = 3; - // open region for distributedLogReplay - // optional bool DEPRECATED_openForDistributedLogReplay = 4; - } -} - -message OpenRegionResponse { - repeated RegionOpeningState opening_state = 1; - - enum RegionOpeningState { - OPENED = 0; - ALREADY_OPENED = 1; - FAILED_OPENING = 2; - } -} - -message WarmupRegionRequest { - - required RegionInfo regionInfo = 1; -} - -message WarmupRegionResponse { -} - -/** - * Closes the specified region and will use or not use ZK during the close - * according to the specified flag. - */ -message CloseRegionRequest { - required RegionSpecifier region = 1; - optional uint32 version_of_closing_node = 2; - optional bool transition_in_ZK = 3 [default = true]; - optional ServerName destination_server = 4; - // the intended server for this RPC. - optional uint64 serverStartCode = 5; -} - -message CloseRegionResponse { - required bool closed = 1; -} - -/** - * Flushes the MemStore of the specified region. - *

- * This method is synchronous. - */ -message FlushRegionRequest { - required RegionSpecifier region = 1; - optional uint64 if_older_than_ts = 2; - optional bool write_flush_wal_marker = 3; // whether to write a marker to WAL even if not flushed -} - -message FlushRegionResponse { - required uint64 last_flush_time = 1; - optional bool flushed = 2; - optional bool wrote_flush_wal_marker = 3; -} - -/** - * Splits the specified region. - *

- * This method currently flushes the region and then forces a compaction which - * will then trigger a split. The flush is done synchronously but the - * compaction is asynchronous. - */ -message SplitRegionRequest { - required RegionSpecifier region = 1; - optional bytes split_point = 2; -} - -message SplitRegionResponse { -} - -/** - * Compacts the specified region. Performs a major compaction if specified. - *

- * This method is asynchronous. - */ -message CompactRegionRequest { - required RegionSpecifier region = 1; - optional bool major = 2; - optional bytes family = 3; -} - -message CompactRegionResponse { -} - -message UpdateFavoredNodesRequest { - repeated RegionUpdateInfo update_info = 1; - - message RegionUpdateInfo { - required RegionInfo region = 1; - repeated ServerName favored_nodes = 2; - } -} - -message UpdateFavoredNodesResponse { - optional uint32 response = 1; -} - -/** - * Merges the specified regions. - *

- * This method currently closes the regions and then merges them - */ -message MergeRegionsRequest { - required RegionSpecifier region_a = 1; - required RegionSpecifier region_b = 2; - optional bool forcible = 3 [default = false]; - // wall clock time from master - optional uint64 master_system_time = 4; -} - -message MergeRegionsResponse { -} - -// Protocol buffer version of WAL for replication -message WALEntry { - required WALKey key = 1; - // Following may be null if the KVs/Cells are carried along the side in a cellblock (See - // RPC for more on cellblocks). If Cells/KVs are in a cellblock, this next field is null - // and associated_cell_count has count of Cells associated w/ this WALEntry - repeated bytes key_value_bytes = 2; - // If Cell data is carried alongside in a cellblock, this is count of Cells in the cellblock. - optional int32 associated_cell_count = 3; -} - -/** - * Replicates the given entries. The guarantee is that the given entries - * will be durable on the slave cluster if this method returns without - * any exception. - */ -message ReplicateWALEntryRequest { - repeated WALEntry entry = 1; - optional string replicationClusterId = 2; - optional string sourceBaseNamespaceDirPath = 3; - optional string sourceHFileArchiveDirPath = 4; -} - -message ReplicateWALEntryResponse { -} - -message RollWALWriterRequest { -} - -/* - * Roll request responses no longer include regions to flush - * this list will always be empty when talking to a 1.0 server - */ -message RollWALWriterResponse { - // A list of encoded name of regions to flush - repeated bytes region_to_flush = 1; -} - -message StopServerRequest { - required string reason = 1; -} - -message StopServerResponse { -} - -message GetServerInfoRequest { -} - -message ServerInfo { - required ServerName server_name = 1; - optional uint32 webui_port = 2; -} - -message GetServerInfoResponse { - required ServerInfo server_info = 1; -} - -message UpdateConfigurationRequest { -} - -message UpdateConfigurationResponse { -} - -service AdminService { - rpc GetRegionInfo(GetRegionInfoRequest) - returns(GetRegionInfoResponse); - - rpc GetStoreFile(GetStoreFileRequest) - returns(GetStoreFileResponse); - - rpc GetOnlineRegion(GetOnlineRegionRequest) - returns(GetOnlineRegionResponse); - - rpc OpenRegion(OpenRegionRequest) - returns(OpenRegionResponse); - - rpc WarmupRegion(WarmupRegionRequest) - returns(WarmupRegionResponse); - - rpc CloseRegion(CloseRegionRequest) - returns(CloseRegionResponse); - - rpc FlushRegion(FlushRegionRequest) - returns(FlushRegionResponse); - - rpc SplitRegion(SplitRegionRequest) - returns(SplitRegionResponse); - - rpc CompactRegion(CompactRegionRequest) - returns(CompactRegionResponse); - - rpc MergeRegions(MergeRegionsRequest) - returns(MergeRegionsResponse); - - rpc ReplicateWALEntry(ReplicateWALEntryRequest) - returns(ReplicateWALEntryResponse); - - rpc Replay(ReplicateWALEntryRequest) - returns(ReplicateWALEntryResponse); - - rpc RollWALWriter(RollWALWriterRequest) - returns(RollWALWriterResponse); - - rpc GetServerInfo(GetServerInfoRequest) - returns(GetServerInfoResponse); - - rpc StopServer(StopServerRequest) - returns(StopServerResponse); - - rpc UpdateFavoredNodes(UpdateFavoredNodesRequest) - returns(UpdateFavoredNodesResponse); - - rpc UpdateConfiguration(UpdateConfigurationRequest) - returns(UpdateConfigurationResponse); -} diff --git a/hbase-protocol/src/main/protobuf/Authentication.proto b/hbase-protocol/src/main/protobuf/Authentication.proto deleted file mode 100644 index aa0211f7b48..00000000000 --- a/hbase-protocol/src/main/protobuf/Authentication.proto +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "AuthenticationProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -message AuthenticationKey { - required int32 id = 1; - required int64 expiration_date = 2; - required bytes key = 3; -} - - -message TokenIdentifier { - enum Kind { - HBASE_AUTH_TOKEN = 0; - } - required Kind kind = 1; - required bytes username = 2; - required int32 key_id = 3; - optional int64 issue_date = 4; - optional int64 expiration_date = 5; - optional int64 sequence_number = 6; -} - - -// Serialization of the org.apache.hadoop.security.token.Token class -// Note that this is a Hadoop class, so fields may change! -message Token { - // the TokenIdentifier in serialized form - // Note: we can't use the protobuf directly because the Hadoop Token class - // only stores the serialized bytes - optional bytes identifier = 1; - optional bytes password = 2; - optional bytes service = 3; -} - - -// RPC request & response messages -message GetAuthenticationTokenRequest { -} - -message GetAuthenticationTokenResponse { - optional Token token = 1; -} - -message WhoAmIRequest { -} - -message WhoAmIResponse { - optional string username = 1; - optional string auth_method = 2; -} - - -// RPC service -service AuthenticationService { - rpc GetAuthenticationToken(GetAuthenticationTokenRequest) - returns (GetAuthenticationTokenResponse); - - rpc WhoAmI(WhoAmIRequest) - returns (WhoAmIResponse); -} diff --git a/hbase-protocol/src/main/protobuf/Cell.proto b/hbase-protocol/src/main/protobuf/Cell.proto deleted file mode 100644 index f9628081897..00000000000 --- a/hbase-protocol/src/main/protobuf/Cell.proto +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// Cell and KeyValue protos -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "CellProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -/** - * The type of the key in a Cell - */ -enum CellType { - MINIMUM = 0; - PUT = 4; - - DELETE = 8; - DELETE_FAMILY_VERSION = 10; - DELETE_COLUMN = 12; - DELETE_FAMILY = 14; - - // MAXIMUM is used when searching; you look from maximum on down. - MAXIMUM = 255; -} - -/** - * Protocol buffer version of Cell. - */ -message Cell { - optional bytes row = 1; - optional bytes family = 2; - optional bytes qualifier = 3; - optional uint64 timestamp = 4; - optional CellType cell_type = 5; - optional bytes value = 6; - optional bytes tags = 7; -} - -/** - * Protocol buffer version of KeyValue. - * It doesn't have those transient parameters - */ -message KeyValue { - required bytes row = 1; - required bytes family = 2; - required bytes qualifier = 3; - optional uint64 timestamp = 4; - optional CellType key_type = 5; - optional bytes value = 6; - optional bytes tags = 7; -} diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto deleted file mode 100644 index a8454f922d2..00000000000 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ /dev/null @@ -1,550 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers that are used for Client service. -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "ClientProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; -import "Filter.proto"; -import "Cell.proto"; -import "Comparator.proto"; -import "MapReduce.proto"; - -/** - * The protocol buffer version of Authorizations. - */ -message Authorizations { - repeated string label = 1; -} - -/** - * The protocol buffer version of CellVisibility. - */ -message CellVisibility { - required string expression = 1; -} - -/** - * Container for a list of column qualifier names of a family. - */ -message Column { - required bytes family = 1; - repeated bytes qualifier = 2; -} - -/** - * Consistency defines the expected consistency level for an operation. - */ -enum Consistency { - STRONG = 0; - TIMELINE = 1; -} - -/** - * The protocol buffer version of Get. - * Unless existence_only is specified, return all the requested data - * for the row that matches exactly. - */ -message Get { - required bytes row = 1; - repeated Column column = 2; - repeated NameBytesPair attribute = 3; - optional Filter filter = 4; - optional TimeRange time_range = 5; - optional uint32 max_versions = 6 [default = 1]; - optional bool cache_blocks = 7 [default = true]; - optional uint32 store_limit = 8; - optional uint32 store_offset = 9; - - // The result isn't asked for, just check for - // the existence. - optional bool existence_only = 10 [default = false]; - - // If the row to get doesn't exist, return the - // closest row before. Deprecated. No longer used! - // Since hbase-2.0.0. - optional bool closest_row_before = 11 [default = false]; - - optional Consistency consistency = 12 [default = STRONG]; - repeated ColumnFamilyTimeRange cf_time_range = 13; - optional bool load_column_families_on_demand = 14; /* DO NOT add defaults to load_column_families_on_demand. */ -} - -message Result { - // Result includes the Cells or else it just has a count of Cells - // that are carried otherwise. - repeated Cell cell = 1; - // The below count is set when the associated cells are - // not part of this protobuf message; they are passed alongside - // and then this Message is just a placeholder with metadata. - // The count is needed to know how many to peel off the block of Cells as - // ours. NOTE: This is different from the pb managed cell_count of the - // 'cell' field above which is non-null when the cells are pb'd. - optional int32 associated_cell_count = 2; - - // used for Get to check existence only. Not set if existence_only was not set to true - // in the query. - optional bool exists = 3; - - // Whether or not the results are coming from possibly stale data - optional bool stale = 4 [default = false]; - - // Whether or not the entire result could be returned. Results will be split when - // the RPC chunk size limit is reached. Partial results contain only a subset of the - // cells for a row and must be combined with a result containing the remaining cells - // to form a complete result. The equivalent flag in o.a.h.h.client.Result is - // mayHaveMoreCellsInRow. - optional bool partial = 5 [default = false]; -} - -/** - * The get request. Perform a single Get operation. - */ -message GetRequest { - required RegionSpecifier region = 1; - required Get get = 2; -} - -message GetResponse { - optional Result result = 1; -} - -/** - * Condition to check if the value of a given cell (row, - * family, qualifier) matches a value via a given comparator. - * - * Condition is used in check and mutate operations. - */ -message Condition { - required bytes row = 1; - optional bytes family = 2; - optional bytes qualifier = 3; - optional CompareType compare_type = 4; - optional Comparator comparator = 5; - optional TimeRange time_range = 6; - optional Filter filter = 7; -} - - -/** - * A specific mutation inside a mutate request. - * It can be an append, increment, put or delete based - * on the mutation type. It can be fully filled in or - * only metadata present because data is being carried - * elsewhere outside of pb. - */ -message MutationProto { - optional bytes row = 1; - optional MutationType mutate_type = 2; - repeated ColumnValue column_value = 3; - optional uint64 timestamp = 4; - repeated NameBytesPair attribute = 5; - optional Durability durability = 6 [default = USE_DEFAULT]; - - // For some mutations, a result may be returned, in which case, - // time range can be specified for potential performance gain - optional TimeRange time_range = 7; - // The below count is set when the associated cells are NOT - // part of this protobuf message; they are passed alongside - // and then this Message is a placeholder with metadata. The - // count is needed to know how many to peel off the block of Cells as - // ours. NOTE: This is different from the pb managed cell_count of the - // 'cell' field above which is non-null when the cells are pb'd. - optional int32 associated_cell_count = 8; - - optional uint64 nonce = 9; - - enum Durability { - USE_DEFAULT = 0; - SKIP_WAL = 1; - ASYNC_WAL = 2; - SYNC_WAL = 3; - FSYNC_WAL = 4; - } - - enum MutationType { - APPEND = 0; - INCREMENT = 1; - PUT = 2; - DELETE = 3; - } - - enum DeleteType { - DELETE_ONE_VERSION = 0; - DELETE_MULTIPLE_VERSIONS = 1; - DELETE_FAMILY = 2; - DELETE_FAMILY_VERSION = 3; - } - - message ColumnValue { - required bytes family = 1; - repeated QualifierValue qualifier_value = 2; - - message QualifierValue { - optional bytes qualifier = 1; - optional bytes value = 2; - optional uint64 timestamp = 3; - optional DeleteType delete_type = 4; - optional bytes tags = 5; - } - } -} - -/** - * The mutate request. Perform a single Mutate operation. - * - * Optionally, you can specify a condition. The mutate - * will take place only if the condition is met. Otherwise, - * the mutate will be ignored. In the response result, - * parameter processed is used to indicate if the mutate - * actually happened. - */ -message MutateRequest { - required RegionSpecifier region = 1; - required MutationProto mutation = 2; - optional Condition condition = 3; - optional uint64 nonce_group = 4; -} - -message MutateResponse { - optional Result result = 1; - - // used for mutate to indicate processed only - optional bool processed = 2; -} - -/** - * Instead of get from a table, you can scan it with optional filters. - * You can specify the row key range, time range, the columns/families - * to scan and so on. - * - * This scan is used the first time in a scan request. The response of - * the initial scan will return a scanner id, which should be used to - * fetch result batches later on before it is closed. - */ -message Scan { - repeated Column column = 1; - repeated NameBytesPair attribute = 2; - optional bytes start_row = 3; - optional bytes stop_row = 4; - optional Filter filter = 5; - optional TimeRange time_range = 6; - optional uint32 max_versions = 7 [default = 1]; - optional bool cache_blocks = 8 [default = true]; - optional uint32 batch_size = 9; - optional uint64 max_result_size = 10; - optional uint32 store_limit = 11; - optional uint32 store_offset = 12; - optional bool load_column_families_on_demand = 13; /* DO NOT add defaults to load_column_families_on_demand. */ - optional bool small = 14 [deprecated = true]; - optional bool reversed = 15 [default = false]; - optional Consistency consistency = 16 [default = STRONG]; - optional uint32 caching = 17; - optional bool allow_partial_results = 18; - repeated ColumnFamilyTimeRange cf_time_range = 19; - optional uint64 mvcc_read_point = 20 [default = 0]; - optional bool include_start_row = 21 [default = true]; - optional bool include_stop_row = 22 [default = false]; - enum ReadType { - DEFAULT = 0; - STREAM = 1; - PREAD = 2; - } - optional ReadType readType = 23 [default = DEFAULT]; - optional bool need_cursor_result = 24 [default = false]; -} - -/** - * A scan request. Initially, it should specify a scan. Later on, you - * can use the scanner id returned to fetch result batches with a different - * scan request. - * - * The scanner will remain open if there are more results, and it's not - * asked to be closed explicitly. - * - * You can fetch the results and ask the scanner to be closed to save - * a trip if you are not interested in remaining results. - */ -message ScanRequest { - optional RegionSpecifier region = 1; - optional Scan scan = 2; - optional uint64 scanner_id = 3; - optional uint32 number_of_rows = 4; - optional bool close_scanner = 5; - optional uint64 next_call_seq = 6; - optional bool client_handles_partials = 7; - optional bool client_handles_heartbeats = 8; - optional bool track_scan_metrics = 9; - optional bool renew = 10 [default = false]; - // if we have returned limit_of_rows rows to client, then close the scanner. - optional uint32 limit_of_rows = 11 [default = 0]; -} - -/** -* Scan cursor to tell client where we are scanning. -* - */ -message Cursor { - optional bytes row = 1; -} - -/** - * The scan response. If there are no more results, more_results will - * be false. If it is not specified, it means there are more. - */ -message ScanResponse { - // This field is filled in if we are doing cellblocks. A cellblock is made up - // of all Cells serialized out as one cellblock BUT responses from a server - // have their Cells grouped by Result. So we can reconstitute the - // Results on the client-side, this field is a list of counts of Cells - // in each Result that makes up the response. For example, if this field - // has 3, 3, 3 in it, then we know that on the client, we are to make - // three Results each of three Cells each. - repeated uint32 cells_per_result = 1; - - optional uint64 scanner_id = 2; - optional bool more_results = 3; - optional uint32 ttl = 4; - // If cells are not carried in an accompanying cellblock, then they are pb'd here. - // This field is mutually exclusive with cells_per_result (since the Cells will - // be inside the pb'd Result) - repeated Result results = 5; - optional bool stale = 6; - - // This field is filled in if we are doing cellblocks. In the event that a row - // could not fit all of its cells into a single RPC chunk, the results will be - // returned as partials, and reconstructed into a complete result on the client - // side. This field is a list of flags indicating whether or not the result - // that the cells belong to is a partial result. For example, if this field - // has false, false, true in it, then we know that on the client side, we need to - // make another RPC request since the last result was only a partial. - repeated bool partial_flag_per_result = 7; - - // A server may choose to limit the number of results returned to the client for - // reasons such as the size in bytes or quantity of results accumulated. This field - // will true when more results exist in the current region. - optional bool more_results_in_region = 8; - - // This field is filled in if the server is sending back a heartbeat message. - // Heartbeat messages are sent back to the client to prevent the scanner from - // timing out. Seeing a heartbeat message communicates to the Client that the - // server would have continued to scan had the time limit not been reached. - optional bool heartbeat_message = 9; - - // This field is filled in if the client has requested that scan metrics be tracked. - // The metrics tracked here are sent back to the client to be tracked together with - // the existing client side metrics. - optional ScanMetrics scan_metrics = 10; - - // The mvcc read point which is used to open the scanner at server side. Client can - // make use of this mvcc_read_point when restarting a scanner to get a consistent view - // of a row. - optional uint64 mvcc_read_point = 11 [default = 0]; - - // If the Scan need cursor, return the row key we are scanning in heartbeat message. - // If the Scan doesn't need a cursor, don't set this field to reduce network IO. - optional Cursor cursor = 12; -} - -/** - * Atomically bulk load multiple HFiles (say from different column families) - * into an open region. - */ -message BulkLoadHFileRequest { - required RegionSpecifier region = 1; - repeated FamilyPath family_path = 2; - optional bool assign_seq_num = 3; - optional DelegationToken fs_token = 4; - optional string bulk_token = 5; - optional bool copy_file = 6 [default = false]; - - message FamilyPath { - required bytes family = 1; - required string path = 2; - } -} - -message BulkLoadHFileResponse { - required bool loaded = 1; -} - -message DelegationToken { - optional bytes identifier = 1; - optional bytes password = 2; - optional string kind = 3; - optional string service = 4; -} - -message PrepareBulkLoadRequest { - required TableName table_name = 1; - optional RegionSpecifier region = 2; -} - -message PrepareBulkLoadResponse { - required string bulk_token = 1; -} - -message CleanupBulkLoadRequest { - required string bulk_token = 1; - optional RegionSpecifier region = 2; -} - -message CleanupBulkLoadResponse { -} - -message CoprocessorServiceCall { - required bytes row = 1; - required string service_name = 2; - required string method_name = 3; - required bytes request = 4; -} - -message CoprocessorServiceResult { - optional NameBytesPair value = 1; -} - -message CoprocessorServiceRequest { - required RegionSpecifier region = 1; - required CoprocessorServiceCall call = 2; -} - -message CoprocessorServiceResponse { - required RegionSpecifier region = 1; - required NameBytesPair value = 2; -} - -// Either a Get or a Mutation -message Action { - // If part of a multi action, useful aligning - // result with what was originally submitted. - optional uint32 index = 1; - optional MutationProto mutation = 2; - optional Get get = 3; - optional CoprocessorServiceCall service_call = 4; -} - -/** - * Actions to run against a Region. - */ -message RegionAction { - required RegionSpecifier region = 1; - // When set, run mutations as atomic unit. - optional bool atomic = 2; - repeated Action action = 3; -} - -/* -* Statistics about the current load on the region -*/ -message RegionLoadStats { - // Percent load on the memstore. Guaranteed to be positive, between 0 and 100. - optional int32 memStoreLoad = 1 [default = 0]; - // Percent JVM heap occupancy. Guaranteed to be positive, between 0 and 100. - // We can move this to "ServerLoadStats" should we develop them. - optional int32 heapOccupancy = 2 [default = 0]; - // Compaction pressure. Guaranteed to be positive, between 0 and 100. - optional int32 compactionPressure = 3 [default = 0]; -} - -message MultiRegionLoadStats{ - repeated RegionSpecifier region = 1; - repeated RegionLoadStats stat = 2; -} - -/** - * Either a Result or an Exception NameBytesPair (keyed by - * exception name whose value is the exception stringified) - * or maybe empty if no result and no exception. - */ -message ResultOrException { - // If part of a multi call, save original index of the list of all - // passed so can align this response w/ original request. - optional uint32 index = 1; - optional Result result = 2; - optional NameBytesPair exception = 3; - // result if this was a coprocessor service call - optional CoprocessorServiceResult service_result = 4; - // current load on the region - optional RegionLoadStats loadStats = 5 [deprecated=true]; -} - -/** - * The result of a RegionAction. - */ -message RegionActionResult { - repeated ResultOrException resultOrException = 1; - // If the operation failed globally for this region, this exception is set - optional NameBytesPair exception = 2; -} - -/** - * Execute a list of actions on a given region in order. - * Nothing prevents a request to contains a set of RegionAction on the same region. - * For this reason, the matching between the MultiRequest and the MultiResponse is not - * done by the region specifier but by keeping the order of the RegionActionResult vs. - * the order of the RegionAction. - */ -message MultiRequest { - repeated RegionAction regionAction = 1; - optional uint64 nonceGroup = 2; - optional Condition condition = 3; -} - -message MultiResponse { - repeated RegionActionResult regionActionResult = 1; - // used for mutate to indicate processed only - optional bool processed = 2; - optional MultiRegionLoadStats regionStatistics = 3; -} - - -service ClientService { - rpc Get(GetRequest) - returns(GetResponse); - - rpc Mutate(MutateRequest) - returns(MutateResponse); - - rpc Scan(ScanRequest) - returns(ScanResponse); - - rpc BulkLoadHFile(BulkLoadHFileRequest) - returns(BulkLoadHFileResponse); - - rpc PrepareBulkLoad(PrepareBulkLoadRequest) - returns (PrepareBulkLoadResponse); - - rpc CleanupBulkLoad(CleanupBulkLoadRequest) - returns (CleanupBulkLoadResponse); - - rpc ExecService(CoprocessorServiceRequest) - returns(CoprocessorServiceResponse); - - rpc ExecRegionServerService(CoprocessorServiceRequest) - returns(CoprocessorServiceResponse); - - rpc Multi(MultiRequest) - returns(MultiResponse); -} diff --git a/hbase-protocol/src/main/protobuf/ClusterId.proto b/hbase-protocol/src/main/protobuf/ClusterId.proto deleted file mode 100644 index 8d916a690fd..00000000000 --- a/hbase-protocol/src/main/protobuf/ClusterId.proto +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers that are shared throughout HBase -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "ClusterIdProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -/** - * Content of the '/hbase/hbaseid', cluster id, znode. - * Also cluster of the ${HBASE_ROOTDIR}/hbase.id file. - */ -message ClusterId { - // This is the cluster id, a uuid as a String - required string cluster_id = 1; -} diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto deleted file mode 100644 index a41ac7113c4..00000000000 --- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto +++ /dev/null @@ -1,283 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers that are used for ClustStatus -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "ClusterStatusProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; -import "ClusterId.proto"; -import "FS.proto"; - -message RegionState { - required RegionInfo region_info = 1; - required State state = 2; - optional uint64 stamp = 3; - enum State { - OFFLINE = 0; // region is in an offline state - PENDING_OPEN = 1; // sent rpc to server to open but has not begun - OPENING = 2; // server has begun to open but not yet done - OPEN = 3; // server opened region and updated meta - PENDING_CLOSE = 4; // sent rpc to server to close but has not begun - CLOSING = 5; // server has begun to close but not yet done - CLOSED = 6; // server closed region and updated meta - SPLITTING = 7; // server started split of a region - SPLIT = 8; // server completed split of a region - FAILED_OPEN = 9; // failed to open, and won't retry any more - FAILED_CLOSE = 10; // failed to close, and won't retry any more - MERGING = 11; // server started merge a region - MERGED = 12; // server completed merge of a region - SPLITTING_NEW = 13; // new region to be created when RS splits a parent - // region but hasn't be created yet, or master doesn't - // know it's already created - MERGING_NEW = 14; // new region to be created when RS merges two - // daughter regions but hasn't be created yet, or - // master doesn't know it's already created - } -} - -message RegionInTransition { - required RegionSpecifier spec = 1; - required RegionState region_state = 2; -} - -/** - * sequence Id of a store - */ -message StoreSequenceId { - required bytes family_name = 1; - required uint64 sequence_id = 2; -} - -/** - * contains a sequence id of a region which should be the minimum of its store sequence ids and - * list of sequence ids of the region's stores - */ -message RegionStoreSequenceIds { - required uint64 last_flushed_sequence_id = 1; - repeated StoreSequenceId store_sequence_id = 2; -} - -message RegionLoad { - /** the region specifier */ - required RegionSpecifier region_specifier = 1; - - /** the number of stores for the region */ - optional uint32 stores = 2; - - /** the number of storefiles for the region */ - optional uint32 storefiles = 3; - - /** the total size of the store files for the region, uncompressed, in MB */ - optional uint32 store_uncompressed_size_MB = 4; - - /** the current total size of the store files for the region, in MB */ - optional uint32 storefile_size_MB = 5; - - /** the current size of the memstore for the region, in MB */ - optional uint32 memstore_size_MB = 6; - - /** - * The current total size of root-level store file indexes for the region, - * in KB. The same as {@link #rootIndexSizeKB}. - */ - optional uint64 storefile_index_size_KB = 7; - - /** the current total read requests made to region */ - optional uint64 read_requests_count = 8; - - /** the current total write requests made to region */ - optional uint64 write_requests_count = 9; - - /** the total compacting key values in currently running compaction */ - optional uint64 total_compacting_KVs = 10; - - /** the completed count of key values in currently running compaction */ - optional uint64 current_compacted_KVs = 11; - - /** The current total size of root-level indexes for the region, in KB. */ - optional uint32 root_index_size_KB = 12; - - /** The total size of all index blocks, not just the root level, in KB. */ - optional uint32 total_static_index_size_KB = 13; - - /** - * The total size of all Bloom filter blocks, not just loaded into the - * block cache, in KB. - */ - optional uint32 total_static_bloom_size_KB = 14; - - /** the most recent sequence Id from cache flush */ - optional uint64 complete_sequence_id = 15; - - /** The current data locality for region in the regionserver */ - optional float data_locality = 16; - - optional uint64 last_major_compaction_ts = 17 [default = 0]; - - /** the most recent sequence Id of store from cache flush */ - repeated StoreSequenceId store_complete_sequence_id = 18; - - /** the current total filtered read requests made to region */ - optional uint64 filtered_read_requests_count = 19; - - /** the current total coprocessor requests made to region */ - optional uint64 cp_requests_count = 20; - - /** the number of references active on the store */ - optional int32 store_ref_count = 21 [default = 0]; - - /** - * The max number of references active on single store file among all compacted store files - * that belong to given region - */ - optional int32 max_compacted_store_file_ref_count = 22 [default = 0]; -} - -message UserLoad { - - /** short user name */ - required string userName = 1; - - /** Metrics for all clients of a user */ - repeated ClientMetrics clientMetrics = 2; - - -} - -message ClientMetrics { - /** client host name */ - required string hostName = 1; - - /** the current total read requests made from a client */ - optional uint64 read_requests_count = 2; - - /** the current total write requests made from a client */ - optional uint64 write_requests_count = 3; - - /** the current total filtered requests made from a client */ - optional uint64 filtered_requests_count = 4; - -} - -/* Server-level protobufs */ - -message ReplicationLoadSink { - required uint64 ageOfLastAppliedOp = 1; - required uint64 timeStampsOfLastAppliedOp = 2; -} - -message ReplicationLoadSource { - required string peerID = 1; - required uint64 ageOfLastShippedOp = 2; - required uint32 sizeOfLogQueue = 3; - required uint64 timeStampOfLastShippedOp = 4; - required uint64 replicationLag = 5; -} - -message ServerLoad { - /** Number of requests since last report. */ - optional uint64 number_of_requests = 1; - - /** Total Number of requests from the start of the region server. */ - optional uint64 total_number_of_requests = 2; - - /** the amount of used heap, in MB. */ - optional uint32 used_heap_MB = 3; - - /** the maximum allowable size of the heap, in MB. */ - optional uint32 max_heap_MB = 4; - - /** Information on the load of individual regions. */ - repeated RegionLoad region_loads = 5; - - /** - * Regionserver-level coprocessors, e.g., WALObserver implementations. - * Region-level coprocessors, on the other hand, are stored inside RegionLoad - * objects. - */ - repeated Coprocessor coprocessors = 6; - - /** - * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests) - * time is measured as the difference, measured in milliseconds, between the current time - * and midnight, January 1, 1970 UTC. - */ - optional uint64 report_start_time = 7; - - /** - * Time when report was generated. - * time is measured as the difference, measured in milliseconds, between the current time - * and midnight, January 1, 1970 UTC. - */ - optional uint64 report_end_time = 8; - - /** - * The port number that this region server is hosing an info server on. - */ - optional uint32 info_server_port = 9; - - /** - * The replicationLoadSource for the replication Source status of this region server. - */ - repeated ReplicationLoadSource replLoadSource = 10; - - /** - * The replicationLoadSink for the replication Sink status of this region server. - */ - optional ReplicationLoadSink replLoadSink = 11; - - /** - * The metrics for each user on this region server - */ - repeated UserLoad userLoads = 12; -} - -message LiveServerInfo { - required ServerName server = 1; - required ServerLoad server_load = 2; -} - -message ClusterStatus { - optional HBaseVersionFileContent hbase_version = 1; - repeated LiveServerInfo live_servers = 2; - repeated ServerName dead_servers = 3; - repeated RegionInTransition regions_in_transition = 4; - optional ClusterId cluster_id = 5; - repeated Coprocessor master_coprocessors = 6; - optional ServerName master = 7; - repeated ServerName backup_masters = 8; - optional bool balancer_on = 9; -} - -enum Option { - HBASE_VERSION = 0; - CLUSTER_ID = 1; - LIVE_SERVERS = 2; - DEAD_SERVERS = 3; - MASTER = 4; - BACKUP_MASTERS = 5; - MASTER_COPROCESSORS = 6; - REGIONS_IN_TRANSITION = 7; - BALANCER_ON = 8; -} diff --git a/hbase-protocol/src/main/protobuf/Comparator.proto b/hbase-protocol/src/main/protobuf/Comparator.proto deleted file mode 100644 index 0a59cf3874b..00000000000 --- a/hbase-protocol/src/main/protobuf/Comparator.proto +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers that are used for filters -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "ComparatorProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -// This file contains protocol buffers that are used for comparators (e.g. in filters) - -message Comparator { - required string name = 1; - optional bytes serialized_comparator = 2; -} - -message ByteArrayComparable { - optional bytes value = 1; -} - -message BinaryComparator { - required ByteArrayComparable comparable = 1; -} - -message LongComparator { - required ByteArrayComparable comparable = 1; -} - -message BinaryPrefixComparator { - required ByteArrayComparable comparable = 1; -} - -message BitComparator { - required ByteArrayComparable comparable = 1; - required BitwiseOp bitwise_op = 2; - - enum BitwiseOp { - AND = 1; - OR = 2; - XOR = 3; - } -} - -message NullComparator { -} - -message RegexStringComparator { - required string pattern = 1; - required int32 pattern_flags = 2; - required string charset = 3; - optional string engine = 4; -} - -message SubstringComparator { - required string substr = 1; -} - -message BigDecimalComparator { - required ByteArrayComparable comparable = 1; -} - -message BinaryComponentComparator { - required bytes value = 1; - required uint32 offset = 2; -} diff --git a/hbase-protocol/src/main/protobuf/Encryption.proto b/hbase-protocol/src/main/protobuf/Encryption.proto deleted file mode 100644 index e08ca2b0481..00000000000 --- a/hbase-protocol/src/main/protobuf/Encryption.proto +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers used for encryption -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "EncryptionProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -message WrappedKey { - required string algorithm = 1; - required uint32 length = 2; - required bytes data = 3; - optional bytes iv = 4; - optional bytes hash = 5; -} diff --git a/hbase-protocol/src/main/protobuf/ErrorHandling.proto b/hbase-protocol/src/main/protobuf/ErrorHandling.proto deleted file mode 100644 index c57a6d25250..00000000000 --- a/hbase-protocol/src/main/protobuf/ErrorHandling.proto +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers that are used for error handling -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "ErrorHandlingProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -/** - * Protobuf version of a java.lang.StackTraceElement - * so we can serialize exceptions. - */ -message StackTraceElementMessage { - optional string declaring_class = 1; - optional string method_name = 2; - optional string file_name = 3; - optional int32 line_number = 4; -} - -/** - * Cause of a remote failure for a generic exception. Contains - * all the information for a generic exception as well as - * optional info about the error for generic info passing - * (which should be another protobuffed class). - */ -message GenericExceptionMessage { - optional string class_name = 1; - optional string message = 2; - optional bytes error_info = 3; - repeated StackTraceElementMessage trace = 4; -} - -/** - * Exception sent across the wire when a remote task needs - * to notify other tasks that it failed and why - */ -message ForeignExceptionMessage { - optional string source = 1; - optional GenericExceptionMessage generic_exception = 2; -} diff --git a/hbase-protocol/src/main/protobuf/FS.proto b/hbase-protocol/src/main/protobuf/FS.proto deleted file mode 100644 index 090617972fa..00000000000 --- a/hbase-protocol/src/main/protobuf/FS.proto +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers that are written into the filesystem -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "FSProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -/** - * The ${HBASE_ROOTDIR}/hbase.version file content - */ -message HBaseVersionFileContent { - required string version = 1; -} - -/** - * Reference file content used when we split an hfile under a region. - */ -message Reference { - required bytes splitkey = 1; - enum Range { - TOP = 0; - BOTTOM = 1; - } - required Range range = 2; -} - diff --git a/hbase-protocol/src/main/protobuf/Filter.proto b/hbase-protocol/src/main/protobuf/Filter.proto deleted file mode 100644 index 0b4c2013f4c..00000000000 --- a/hbase-protocol/src/main/protobuf/Filter.proto +++ /dev/null @@ -1,179 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers that are used for filters -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "FilterProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; -import "Comparator.proto"; - -message Filter { - required string name = 1; - optional bytes serialized_filter = 2; -} - -message ColumnCountGetFilter { - required int32 limit = 1; -} - -message ColumnPaginationFilter { - required int32 limit = 1; - optional int32 offset = 2; - optional bytes column_offset = 3; -} - -message ColumnPrefixFilter { - required bytes prefix = 1; -} - -message ColumnRangeFilter { - optional bytes min_column = 1; - optional bool min_column_inclusive = 2; - optional bytes max_column = 3; - optional bool max_column_inclusive = 4; -} - -message CompareFilter { - required CompareType compare_op = 1; - optional Comparator comparator = 2; -} - -message DependentColumnFilter { - required CompareFilter compare_filter = 1; - optional bytes column_family = 2; - optional bytes column_qualifier = 3; - optional bool drop_dependent_column = 4; -} - -message FamilyFilter { - required CompareFilter compare_filter = 1; -} - -message FilterList { - required Operator operator = 1; - repeated Filter filters = 2; - - enum Operator { - MUST_PASS_ALL = 1; - MUST_PASS_ONE = 2; - } -} - -message FilterWrapper { - required Filter filter = 1; -} - -message FirstKeyOnlyFilter { -} - -message FirstKeyValueMatchingQualifiersFilter { - repeated bytes qualifiers = 1; -} - -message FuzzyRowFilter { - repeated BytesBytesPair fuzzy_keys_data = 1; -} - -message InclusiveStopFilter { - optional bytes stop_row_key = 1; -} - -message KeyOnlyFilter { - required bool len_as_val = 1; -} - -message MultipleColumnPrefixFilter { - repeated bytes sorted_prefixes = 1; -} - -message PageFilter { - required int64 page_size = 1; -} - -message PrefixFilter { - optional bytes prefix = 1; -} - -message QualifierFilter { - required CompareFilter compare_filter = 1; -} - -message RandomRowFilter { - required float chance = 1; -} - -message RowFilter { - required CompareFilter compare_filter = 1; -} - -message SingleColumnValueExcludeFilter { - required SingleColumnValueFilter single_column_value_filter = 1; -} - -message SingleColumnValueFilter { - optional bytes column_family = 1; - optional bytes column_qualifier = 2; - required CompareType compare_op = 3; - required Comparator comparator = 4; - optional bool filter_if_missing = 5; - optional bool latest_version_only = 6; -} - -message SkipFilter { - required Filter filter = 1; -} - -message TimestampsFilter { - repeated int64 timestamps = 1 [packed=true]; - optional bool can_hint = 2; -} - -message ValueFilter { - required CompareFilter compare_filter = 1; -} - -message WhileMatchFilter { - required Filter filter = 1; -} -message FilterAllFilter { -} - -message RowRange { - optional bytes start_row = 1; - optional bool start_row_inclusive = 2; - optional bytes stop_row = 3; - optional bool stop_row_inclusive =4; -} - -message MultiRowRangeFilter { - repeated RowRange row_range_list = 1; -} - -message ColumnValueFilter { - required bytes family = 1; - required bytes qualifier = 2; - required CompareType compare_op = 3; - required Comparator comparator = 4; -} diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto deleted file mode 100644 index f324aae7bb0..00000000000 --- a/hbase-protocol/src/main/protobuf/HBase.proto +++ /dev/null @@ -1,254 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers that are shared throughout HBase -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "HBaseProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - - -/** - * Table Name - */ -message TableName { - required bytes namespace = 1; - required bytes qualifier = 2; -} - -/** - * Table Schema - * Inspired by the rest TableSchema - */ -message TableSchema { - optional TableName table_name = 1; - repeated BytesBytesPair attributes = 2; - repeated ColumnFamilySchema column_families = 3; - repeated NameStringPair configuration = 4; -} - -/** Denotes state of the table */ -message TableState { - // Table's current state - enum State { - ENABLED = 0; - DISABLED = 1; - DISABLING = 2; - ENABLING = 3; - } - // This is the table's state. - required State state = 1; -} - -/** - * Column Family Schema - * Inspired by the rest ColumSchemaMessage - */ -message ColumnFamilySchema { - required bytes name = 1; - repeated BytesBytesPair attributes = 2; - repeated NameStringPair configuration = 3; -} - -/** - * Protocol buffer version of HRegionInfo. - */ -message RegionInfo { - required uint64 region_id = 1; - required TableName table_name = 2; - optional bytes start_key = 3; - optional bytes end_key = 4; - optional bool offline = 5; - optional bool split = 6; - optional int32 replica_id = 7 [default = 0]; -} - -/** - * Protocol buffer for favored nodes - */ -message FavoredNodes { - repeated ServerName favored_node = 1; -} - -/** - * Container protocol buffer to specify a region. - * You can specify region by region name, or the hash - * of the region name, which is known as encoded - * region name. - */ -message RegionSpecifier { - required RegionSpecifierType type = 1; - required bytes value = 2; - - enum RegionSpecifierType { - // ,,. - REGION_NAME = 1; - - // hash of ,, - ENCODED_REGION_NAME = 2; - } -} - -/** - * A range of time. Both from and to are Java time - * stamp in milliseconds. If you don't specify a time - * range, it means all time. By default, if not - * specified, from = 0, and to = Long.MAX_VALUE - */ -message TimeRange { - optional uint64 from = 1; - optional uint64 to = 2; -} - -/* ColumnFamily Specific TimeRange */ -message ColumnFamilyTimeRange { - required bytes column_family = 1; - required TimeRange time_range = 2; -} - -/* Comparison operators */ -enum CompareType { - LESS = 0; - LESS_OR_EQUAL = 1; - EQUAL = 2; - NOT_EQUAL = 3; - GREATER_OR_EQUAL = 4; - GREATER = 5; - NO_OP = 6; -} - -/** - * Protocol buffer version of ServerName - */ -message ServerName { - required string host_name = 1; - optional uint32 port = 2; - optional uint64 start_code = 3; -} - -// Comment data structures - -message Coprocessor { - required string name = 1; -} - -message NameStringPair { - required string name = 1; - required string value = 2; -} - -message NameBytesPair { - required string name = 1; - optional bytes value = 2; -} - -message BytesBytesPair { - required bytes first = 1; - required bytes second = 2; -} - -message NameInt64Pair { - optional string name = 1; - optional int64 value = 2; -} - -/** - * Description of the snapshot to take - */ -message SnapshotDescription { - required string name = 1; - optional string table = 2; // not needed for delete, but checked for in taking snapshot - optional int64 creation_time = 3 [default = 0]; - enum Type { - DISABLED = 0; - FLUSH = 1; - SKIPFLUSH = 2; - } - optional Type type = 4 [default = FLUSH]; - optional int32 version = 5; - optional string owner = 6; - optional int64 ttl = 7 [default = 0]; -} - -/** - * Description of the distributed procedure to take - */ -message ProcedureDescription { - required string signature = 1; // the unique signature of the procedure - optional string instance = 2; // the procedure instance name - optional int64 creation_time = 3 [default = 0]; - repeated NameStringPair configuration = 4; -} - -message EmptyMsg { -} - -enum TimeUnit { - NANOSECONDS = 1; - MICROSECONDS = 2; - MILLISECONDS = 3; - SECONDS = 4; - MINUTES = 5; - HOURS = 6; - DAYS = 7; -} - -message LongMsg { - required int64 long_msg = 1; -} - -message DoubleMsg { - required double double_msg = 1; -} - -message BigDecimalMsg { - required bytes bigdecimal_msg = 1; -} - -message UUID { - required uint64 least_sig_bits = 1; - required uint64 most_sig_bits = 2; -} - -message NamespaceDescriptor { - required bytes name = 1; - repeated NameStringPair configuration = 2; -} - -// Rpc client version info proto. Included in ConnectionHeader on connection setup -message VersionInfo { - required string version = 1; - required string url = 2; - required string revision = 3; - required string user = 4; - required string date = 5; - required string src_checksum = 6; - optional uint32 version_major = 7; - optional uint32 version_minor = 8; -} - -/** - * Description of the region server info - */ -message RegionServerInfo { - optional int32 infoPort = 1; - optional VersionInfo version_info = 2; -} diff --git a/hbase-protocol/src/main/protobuf/HFile.proto b/hbase-protocol/src/main/protobuf/HFile.proto deleted file mode 100644 index df8f57ee238..00000000000 --- a/hbase-protocol/src/main/protobuf/HFile.proto +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "HFileProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; - -// Map of name/values -message FileInfoProto { - repeated BytesBytesPair map_entry = 1; -} - -// HFile file trailer -message FileTrailerProto { - optional uint64 file_info_offset = 1; - optional uint64 load_on_open_data_offset = 2; - optional uint64 uncompressed_data_index_size = 3; - optional uint64 total_uncompressed_bytes = 4; - optional uint32 data_index_count = 5; - optional uint32 meta_index_count = 6; - optional uint64 entry_count = 7; - optional uint32 num_data_index_levels = 8; - optional uint64 first_data_block_offset = 9; - optional uint64 last_data_block_offset = 10; - optional string comparator_class_name = 11; - optional uint32 compression_codec = 12; - optional bytes encryption_key = 13; -} diff --git a/hbase-protocol/src/main/protobuf/LoadBalancer.proto b/hbase-protocol/src/main/protobuf/LoadBalancer.proto deleted file mode 100644 index 0c0882170c7..00000000000 --- a/hbase-protocol/src/main/protobuf/LoadBalancer.proto +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// This file contains protocol buffers to represent the state of the load balancer. -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "LoadBalancerProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -message LoadBalancerState { - optional bool balancer_on = 1; -} diff --git a/hbase-protocol/src/main/protobuf/MapReduce.proto b/hbase-protocol/src/main/protobuf/MapReduce.proto deleted file mode 100644 index 7fb6850539f..00000000000 --- a/hbase-protocol/src/main/protobuf/MapReduce.proto +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - - //This file includes protocol buffers used in MapReduce only. -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "MapReduceProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; - -message ScanMetrics { - repeated NameInt64Pair metrics = 1; -} - -message TableSnapshotRegionSplit { - repeated string locations = 2; - optional TableSchema table = 3; - optional RegionInfo region = 4; -} diff --git a/hbase-protocol/src/main/protobuf/MultiRowMutation.proto b/hbase-protocol/src/main/protobuf/MultiRowMutation.proto deleted file mode 100644 index d3140e9b2e0..00000000000 --- a/hbase-protocol/src/main/protobuf/MultiRowMutation.proto +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -import "Client.proto"; -import "HBase.proto"; -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "MultiRowMutationProtos"; -option java_generate_equals_and_hash = true; -option java_generic_services = true; -option optimize_for = SPEED; - -message MultiRowMutationProcessorRequest{ -} - -message MultiRowMutationProcessorResponse{ -} - -message MutateRowsRequest { - repeated MutationProto mutation_request = 1; - optional uint64 nonce_group = 2; - optional uint64 nonce = 3; - optional RegionSpecifier region = 4; -} - -message MutateRowsResponse { -} - -service MultiRowMutationService { - rpc MutateRows(MutateRowsRequest) - returns(MutateRowsResponse); -} diff --git a/hbase-protocol/src/main/protobuf/PingProtocol.proto b/hbase-protocol/src/main/protobuf/PingProtocol.proto deleted file mode 100644 index 9a645994886..00000000000 --- a/hbase-protocol/src/main/protobuf/PingProtocol.proto +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// Coprocessor test -option java_package = "org.apache.hadoop.hbase.coprocessor.protobuf.generated"; -option java_outer_classname = "PingProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; - -message PingRequest { -} - -message PingResponse { - required string pong = 1; -} - -message CountRequest { -} - -message CountResponse { - required int32 count = 1; -} - -message IncrementCountRequest { - required int32 diff = 1; -} - -message IncrementCountResponse { - required int32 count = 1; -} - -message HelloRequest { - optional string name = 1; -} - -message HelloResponse { - optional string response = 1; -} - -message NoopRequest { -} - -message NoopResponse { -} - -service PingService { - rpc ping(PingRequest) returns(PingResponse); - rpc count(CountRequest) returns(CountResponse); - rpc increment(IncrementCountRequest) returns(IncrementCountResponse); - rpc hello(HelloRequest) returns(HelloResponse); - rpc noop(NoopRequest) returns(NoopResponse); -} diff --git a/hbase-protocol/src/main/protobuf/Quota.proto b/hbase-protocol/src/main/protobuf/Quota.proto deleted file mode 100644 index fa5462c5f1b..00000000000 --- a/hbase-protocol/src/main/protobuf/Quota.proto +++ /dev/null @@ -1,113 +0,0 @@ - /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "QuotaProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; - -enum QuotaScope { - CLUSTER = 1; - MACHINE = 2; -} - -message TimedQuota { - required TimeUnit time_unit = 1; - optional uint64 soft_limit = 2; - optional float share = 3; - optional QuotaScope scope = 4 [default = MACHINE]; -} - -enum ThrottleType { - REQUEST_NUMBER = 1; - REQUEST_SIZE = 2; - WRITE_NUMBER = 3; - WRITE_SIZE = 4; - READ_NUMBER = 5; - READ_SIZE = 6; -} - -message Throttle { - optional TimedQuota req_num = 1; - optional TimedQuota req_size = 2; - - optional TimedQuota write_num = 3; - optional TimedQuota write_size = 4; - - optional TimedQuota read_num = 5; - optional TimedQuota read_size = 6; -} - -message ThrottleRequest { - optional ThrottleType type = 1; - optional TimedQuota timed_quota = 2; -} - -enum QuotaType { - THROTTLE = 1; - SPACE = 2; -} - -message Quotas { - optional bool bypass_globals = 1 [default = false]; - optional Throttle throttle = 2; - optional SpaceQuota space = 3; -} - -message QuotaUsage { -} - -// Defines what action should be taken when the SpaceQuota is violated -enum SpaceViolationPolicy { - DISABLE = 1; // Disable the table(s) - NO_WRITES_COMPACTIONS = 2; // No writes, bulk-loads, or compactions - NO_WRITES = 3; // No writes or bulk-loads - NO_INSERTS = 4; // No puts or bulk-loads, but deletes are allowed -} - -// Defines a limit on the amount of filesystem space used by a table/namespace -message SpaceQuota { - optional uint64 soft_limit = 1; // The limit of bytes for this quota - optional SpaceViolationPolicy violation_policy = 2; // The action to take when the quota is violated - optional bool remove = 3 [default = false]; // When true, remove the quota. -} - -// The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota). -message SpaceLimitRequest { - optional SpaceQuota quota = 1; -} - -// Represents the state of a quota on a table. Either the quota is not in violation -// or it is in violatino there is a violation policy which should be in effect. -message SpaceQuotaStatus { - optional SpaceViolationPolicy violation_policy = 1; - optional bool in_violation = 2; -} - -// Message stored in the value of hbase:quota table to denote the status of a table WRT -// the quota applicable to it. -message SpaceQuotaSnapshot { - optional SpaceQuotaStatus quota_status = 1; - optional uint64 quota_usage = 2; - optional uint64 quota_limit = 3; -} diff --git a/hbase-protocol/src/main/protobuf/RPC.proto b/hbase-protocol/src/main/protobuf/RPC.proto deleted file mode 100644 index 25e051430e2..00000000000 --- a/hbase-protocol/src/main/protobuf/RPC.proto +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -import "Tracing.proto"; -import "HBase.proto"; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "RPCProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -// See https://issues.apache.org/jira/browse/HBASE-7898 for high-level -// description of RPC specification. -// -// On connection setup, the client sends six bytes of preamble -- a four -// byte magic, a byte of version, and a byte of authentication type. -// -// We then send a "ConnectionHeader" protobuf of user information and the -// 'protocol' or 'service' that is to be run over this connection as well as -// info such as codecs and compression to use when we send cell blocks(see below). -// This connection header protobuf is prefaced by an int that holds the length -// of this connection header (this is NOT a varint). The pb connection header -// is sent with Message#writeTo. The server throws an exception if it doesn't -// like what it was sent noting what it is objecting too. Otherwise, the server -// says nothing and is open for business. -// -// Hereafter the client makes requests and the server returns responses. -// -// Requests look like this: -// -// -// -// -// -// -// ...where the Request Parameter Message is whatever the method name stipulated -// in the RequestHeader expects; e.g. if the method is a scan, then the pb -// Request Message is a GetRequest, or a ScanRequest. A block of Cells -// optionally follows. The presence of a Request param Message and/or a -// block of Cells will be noted in the RequestHeader. -// -// Response is the mirror of the request: -// -// -// -// -// -// -// ...where the Response Message is the response type that goes with the -// method specified when making the request and the follow on Cell blocks may -// or may not be there -- read the response header to find out if one following. -// If an exception, it will be included inside the Response Header. -// -// Any time we write a pb, we do it with Message#writeDelimitedTo EXCEPT when -// the connection header is sent; this is prefaced by an int with its length -// and the pb connection header is then written with Message#writeTo. -// - -// User Information proto. Included in ConnectionHeader on connection setup -message UserInformation { - required string effective_user = 1; - optional string real_user = 2; -} - -// This is sent on connection setup after the connection preamble is sent. -message ConnectionHeader { - optional UserInformation user_info = 1; - optional string service_name = 2; - // Cell block codec we will use sending over optional cell blocks. Server throws exception - // if cannot deal. Null means no codec'ing going on so we are pb all the time (SLOW!!!) - optional string cell_block_codec_class = 3; - // Compressor we will use if cell block is compressed. Server will throw exception if not supported. - // Class must implement hadoop's CompressionCodec Interface. Can't compress if no codec. - optional string cell_block_compressor_class = 4; - optional VersionInfo version_info = 5; -} - -// Optional Cell block Message. Included in client RequestHeader -message CellBlockMeta { - // Length of the following cell block. Could calculate it but convenient having it too hand. - optional uint32 length = 1; -} - -// At the RPC layer, this message is used to carry -// the server side exception to the RPC client. -message ExceptionResponse { - // Class name of the exception thrown from the server - optional string exception_class_name = 1; - // Exception stack trace from the server side - optional string stack_trace = 2; - // Optional hostname. Filled in for some exceptions such as region moved - // where exception gives clue on where the region may have moved. - optional string hostname = 3; - optional int32 port = 4; - // Set if we are NOT to retry on receipt of this exception - optional bool do_not_retry = 5; -} - -// Header sent making a request. -message RequestHeader { - // Monotonically increasing call_id to keep track of RPC requests and their response - optional uint32 call_id = 1; - optional RPCTInfo trace_info = 2; - optional string method_name = 3; - // If true, then a pb Message param follows. - optional bool request_param = 4; - // If present, then an encoded data block follows. - optional CellBlockMeta cell_block_meta = 5; - // 0 is NORMAL priority. 200 is HIGH. If no priority, treat it as NORMAL. - // See HConstants. - optional uint32 priority = 6; - optional uint32 timeout = 7; -} - -message ResponseHeader { - optional uint32 call_id = 1; - // If present, then request threw an exception and no response message (else we presume one) - optional ExceptionResponse exception = 2; - // If present, then an encoded data block follows. - optional CellBlockMeta cell_block_meta = 3; -} diff --git a/hbase-protocol/src/main/protobuf/RSGroup.proto b/hbase-protocol/src/main/protobuf/RSGroup.proto deleted file mode 100644 index 31a7716dfd7..00000000000 --- a/hbase-protocol/src/main/protobuf/RSGroup.proto +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "RSGroupProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; - -message RSGroupInfo { - required string name = 1; - repeated ServerName servers = 4; - repeated TableName tables = 3; -} - diff --git a/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto b/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto deleted file mode 100644 index b73e370fb2f..00000000000 --- a/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto +++ /dev/null @@ -1,158 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "RSGroupAdminProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; -import "RSGroup.proto"; - -/** Group level protobufs */ - -message ListTablesOfRSGroupRequest { - required string r_s_group_name = 1; -} - -message ListTablesOfRSGroupResponse { - repeated TableName table_name = 1; -} - -message GetRSGroupInfoRequest { - required string r_s_group_name = 1; -} - -message GetRSGroupInfoResponse { - optional RSGroupInfo r_s_group_info = 1; -} - -message GetRSGroupInfoOfTableRequest { - required TableName table_name = 1; -} - -message GetRSGroupInfoOfTableResponse { - optional RSGroupInfo r_s_group_info = 1; -} - -message MoveServersRequest { - required string target_group = 1; - repeated ServerName servers = 3; -} - -message MoveServersResponse { -} - -message MoveTablesRequest { - required string target_group = 1; - repeated TableName table_name = 2; -} - -message MoveTablesResponse { -} - -message AddRSGroupRequest { - required string r_s_group_name = 1; -} - -message AddRSGroupResponse { -} - -message RemoveRSGroupRequest { - required string r_s_group_name = 1; -} - -message RemoveRSGroupResponse { -} - -message BalanceRSGroupRequest { - required string r_s_group_name = 1; -} - -message BalanceRSGroupResponse { - required bool balanceRan = 1; -} - -message ListRSGroupInfosRequest { -} - -message ListRSGroupInfosResponse { - repeated RSGroupInfo r_s_group_info = 1; -} - -message GetRSGroupInfoOfServerRequest { - required ServerName server = 2; -} - -message GetRSGroupInfoOfServerResponse { - optional RSGroupInfo r_s_group_info = 1; -} - -message MoveServersAndTablesRequest { - required string target_group = 1; - repeated ServerName servers = 2; - repeated TableName table_name = 3; -} - -message MoveServersAndTablesResponse { -} - -message RemoveServersRequest { - repeated ServerName servers = 1; -} - -message RemoveServersResponse { -} - -service RSGroupAdminService { - rpc GetRSGroupInfo(GetRSGroupInfoRequest) - returns (GetRSGroupInfoResponse); - - rpc GetRSGroupInfoOfTable(GetRSGroupInfoOfTableRequest) - returns (GetRSGroupInfoOfTableResponse); - - rpc GetRSGroupInfoOfServer(GetRSGroupInfoOfServerRequest) - returns (GetRSGroupInfoOfServerResponse); - - rpc MoveServers(MoveServersRequest) - returns (MoveServersResponse); - - rpc MoveTables(MoveTablesRequest) - returns (MoveTablesResponse); - - rpc AddRSGroup(AddRSGroupRequest) - returns (AddRSGroupResponse); - - rpc RemoveRSGroup(RemoveRSGroupRequest) - returns (RemoveRSGroupResponse); - - rpc BalanceRSGroup(BalanceRSGroupRequest) - returns (BalanceRSGroupResponse); - - rpc ListRSGroupInfos(ListRSGroupInfosRequest) - returns (ListRSGroupInfosResponse); - - rpc MoveServersAndTables(MoveServersAndTablesRequest) - returns (MoveServersAndTablesResponse); - - rpc RemoveServers(RemoveServersRequest) - returns (RemoveServersResponse); -} diff --git a/hbase-protocol/src/main/protobuf/RowProcessor.proto b/hbase-protocol/src/main/protobuf/RowProcessor.proto deleted file mode 100644 index b2ed362df74..00000000000 --- a/hbase-protocol/src/main/protobuf/RowProcessor.proto +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -/** - * Defines a protocol to perform multi row transactions. - * See BaseRowProcessorEndpoint for the implementation. - * See HRegion#processRowsWithLocks() for details. - */ -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "RowProcessorProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -message ProcessRequest { - required string row_processor_class_name = 1; - optional string row_processor_initializer_message_name = 2; - optional bytes row_processor_initializer_message = 3; - optional uint64 nonce_group = 4; - optional uint64 nonce = 5; -} - -message ProcessResponse { - required bytes row_processor_result = 1; -} - -service RowProcessorService { - rpc Process(ProcessRequest) returns (ProcessResponse); -} diff --git a/hbase-protocol/src/main/protobuf/Snapshot.proto b/hbase-protocol/src/main/protobuf/Snapshot.proto deleted file mode 100644 index 59a65a9a662..00000000000 --- a/hbase-protocol/src/main/protobuf/Snapshot.proto +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "SnapshotProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "FS.proto"; -import "HBase.proto"; - -message SnapshotFileInfo { - enum Type { - HFILE = 1; - WAL = 2; - } - - required Type type = 1; - - optional string hfile = 3; - - optional string wal_server = 4; - optional string wal_name = 5; -} - -message SnapshotRegionManifest { - optional int32 version = 1; - - required RegionInfo region_info = 2; - repeated FamilyFiles family_files = 3; - - message StoreFile { - required string name = 1; - optional Reference reference = 2; - - // TODO: Add checksums or other fields to verify the file - optional uint64 file_size = 3; - } - - message FamilyFiles { - required bytes family_name = 1; - repeated StoreFile store_files = 2; - } -} - -message SnapshotDataManifest { - required TableSchema table_schema = 1; - repeated SnapshotRegionManifest region_manifests = 2; -} diff --git a/hbase-protocol/src/main/protobuf/Tracing.proto b/hbase-protocol/src/main/protobuf/Tracing.proto deleted file mode 100644 index 4fa1ab8c12d..00000000000 --- a/hbase-protocol/src/main/protobuf/Tracing.proto +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "TracingProtos"; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -//Used to pass through the information necessary to continue -//a trace after an RPC is made. All we need is the traceid -//(so we know the overarching trace this message is a part of), and -//the id of the current span when this message was sent, so we know -//what span caused the new span we will create when this message is received. -message RPCTInfo { - optional int64 trace_id = 1; - optional int64 parent_id = 2; -} diff --git a/hbase-protocol/src/main/protobuf/VisibilityLabels.proto b/hbase-protocol/src/main/protobuf/VisibilityLabels.proto deleted file mode 100644 index 44d0dfc4944..00000000000 --- a/hbase-protocol/src/main/protobuf/VisibilityLabels.proto +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "VisibilityLabelsProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "Client.proto"; - -message VisibilityLabelsRequest { - repeated VisibilityLabel visLabel = 1; -} - -message VisibilityLabel { - required bytes label = 1; - optional uint32 ordinal = 2; -} - -message VisibilityLabelsResponse { - repeated RegionActionResult result = 1; -} - -message SetAuthsRequest { - required bytes user = 1; - repeated bytes auth = 2; -} - -message UserAuthorizations { - required bytes user = 1; - repeated uint32 auth = 2; -} - -message MultiUserAuthorizations { - repeated UserAuthorizations userAuths = 1; -} - -message GetAuthsRequest { - required bytes user = 1; -} - -message GetAuthsResponse { - required bytes user = 1; - repeated bytes auth = 2; -} - -message ListLabelsRequest { - optional string regex = 1; -} - -message ListLabelsResponse { - repeated bytes label = 1; -} - -service VisibilityLabelsService { - rpc addLabels(VisibilityLabelsRequest) - returns (VisibilityLabelsResponse); - rpc setAuths(SetAuthsRequest) - returns (VisibilityLabelsResponse); - rpc clearAuths(SetAuthsRequest) - returns (VisibilityLabelsResponse); - rpc getAuths(GetAuthsRequest) - returns (GetAuthsResponse); - rpc listLabels(ListLabelsRequest) - returns (ListLabelsResponse); -} diff --git a/hbase-protocol/src/main/protobuf/WAL.proto b/hbase-protocol/src/main/protobuf/WAL.proto deleted file mode 100644 index f24ea0fc505..00000000000 --- a/hbase-protocol/src/main/protobuf/WAL.proto +++ /dev/null @@ -1,177 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "WALProtos"; -option java_generic_services = false; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; - -message WALHeader { - optional bool has_compression = 1; - optional bytes encryption_key = 2; - optional bool has_tag_compression = 3; - optional string writer_cls_name = 4; - optional string cell_codec_cls_name = 5; -} - -/* - * Protocol buffer version of WALKey; see WALKey comment, not really a key but WALEdit header - * for some KVs - */ -message WALKey { - required bytes encoded_region_name = 1; - required bytes table_name = 2; - required uint64 log_sequence_number = 3; - required uint64 write_time = 4; - /* - This parameter is deprecated in favor of clusters which - contains the list of clusters that have consumed the change. - It is retained so that the log created by earlier releases (0.94) - can be read by the newer releases. - */ - optional UUID cluster_id = 5 [deprecated=true]; - - repeated FamilyScope scopes = 6; - optional uint32 following_kv_count = 7; - - /* - This field contains the list of clusters that have - consumed the change - */ - repeated UUID cluster_ids = 8; - - optional uint64 nonceGroup = 9; - optional uint64 nonce = 10; - optional uint64 orig_sequence_number = 11; - repeated Attribute extended_attributes = 12; -/* - optional CustomEntryType custom_entry_type = 9; - - enum CustomEntryType { - COMPACTION = 0; - } -*/ -} -message Attribute { - required string key = 1; - required bytes value = 2; -} - -enum ScopeType { - REPLICATION_SCOPE_LOCAL = 0; - REPLICATION_SCOPE_GLOBAL = 1; -} - -message FamilyScope { - required bytes family = 1; - required ScopeType scope_type = 2; -} - -/** - * Custom WAL entries - */ - -/** - * Special WAL entry to hold all related to a compaction. - * Written to WAL before completing compaction. There is - * sufficient info in the below message to complete later - * the * compaction should we fail the WAL write. - */ -message CompactionDescriptor { - required bytes table_name = 1; // TODO: WALKey already stores these, might remove - required bytes encoded_region_name = 2; - required bytes family_name = 3; - repeated string compaction_input = 4; // relative to store dir - repeated string compaction_output = 5; - required string store_home_dir = 6; // relative to region dir - optional bytes region_name = 7; // full region name -} - -/** - * Special WAL entry to hold all related to a flush. - */ -message FlushDescriptor { - enum FlushAction { - START_FLUSH = 0; - COMMIT_FLUSH = 1; - ABORT_FLUSH = 2; - CANNOT_FLUSH = 3; // marker for indicating that a flush has been requested but cannot complete - } - - message StoreFlushDescriptor { - required bytes family_name = 1; - required string store_home_dir = 2; //relative to region dir - repeated string flush_output = 3; // relative to store dir (if this is a COMMIT_FLUSH) - } - - required FlushAction action = 1; - required bytes table_name = 2; - required bytes encoded_region_name = 3; - optional uint64 flush_sequence_number = 4; - repeated StoreFlushDescriptor store_flushes = 5; - optional bytes region_name = 6; // full region name -} - -message StoreDescriptor { - required bytes family_name = 1; - required string store_home_dir = 2; //relative to region dir - repeated string store_file = 3; // relative to store dir - optional uint64 store_file_size_bytes = 4; // size of store file -} - -/** - * Special WAL entry used for writing bulk load events to WAL - */ -message BulkLoadDescriptor { - required TableName table_name = 1; - required bytes encoded_region_name = 2; - repeated StoreDescriptor stores = 3; - required int64 bulkload_seq_num = 4; -} - -/** - * Special WAL entry to hold all related to a region event (open/close). - */ -message RegionEventDescriptor { - enum EventType { - REGION_OPEN = 0; - REGION_CLOSE = 1; - } - - required EventType event_type = 1; - required bytes table_name = 2; - required bytes encoded_region_name = 3; - optional uint64 log_sequence_number = 4; - repeated StoreDescriptor stores = 5; - optional ServerName server = 6; // Server who opened the region - optional bytes region_name = 7; // full region name -} - -/** - * A trailer that is appended to the end of a properly closed WAL file. - * If missing, this is either a legacy or a corrupted WAL file. - * N.B. This trailer currently doesn't contain any information and we - * purposefully don't expose it in the WAL APIs. It's for future growth. - */ -message WALTrailer { -} diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto deleted file mode 100644 index e06f4a03a00..00000000000 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -// ZNode data in hbase are serialized protobufs with a four byte -// 'magic' 'PBUF' prefix. -package hbase.pb; - -option java_package = "org.apache.hadoop.hbase.protobuf.generated"; -option java_outer_classname = "ZooKeeperProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -option optimize_for = SPEED; - -import "HBase.proto"; -import "ClusterStatus.proto"; - -/** - * Content of the meta-region-server znode. - */ -message MetaRegionServer { - // The ServerName hosting the meta region currently, or destination server, - // if meta region is in transition. - required ServerName server = 1; - // The major version of the rpc the server speaks. This is used so that - // clients connecting to the cluster can have prior knowledge of what version - // to send to a RegionServer. AsyncHBase will use this to detect versions. - optional uint32 rpc_version = 2; - - // State of the region transition. OPEN means fully operational 'hbase:meta' - optional RegionState.State state = 3; -} - -/** - * Content of the master znode. - */ -message Master { - // The ServerName of the current Master - required ServerName master = 1; - // Major RPC version so that clients can know what version the master can accept. - optional uint32 rpc_version = 2; - optional uint32 info_port = 3; -} - -/** - * Content of the '/hbase/running', cluster state, znode. - */ -message ClusterUp { - // If this znode is present, cluster is up. Currently - // the data is cluster start_date. - required string start_date = 1; -} - -/** - * WAL SplitLog directory znodes have this for content. Used doing distributed - * WAL splitting. Holds current state and name of server that originated split. - */ -message SplitLogTask { - enum State { - UNASSIGNED = 0; - OWNED = 1; - RESIGNED = 2; - DONE = 3; - ERR = 4; - } - required State state = 1; - required ServerName server_name = 2; - // optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN]; -} - -/** - * The znode that holds state of table. - * Deprected, table state is stored in table descriptor on HDFS. - */ -message DeprecatedTableState { - // Table's current state - enum State { - ENABLED = 0; - DISABLED = 1; - DISABLING = 2; - ENABLING = 3; - } - // This is the table's state. If no znode for a table, - // its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class - // for more. - required State state = 1 [default = ENABLED]; -} - -message TableCF { - optional TableName table_name = 1; - repeated bytes families = 2; -} - -/** - * Used by replication. Holds a replication peer key. - */ -message ReplicationPeer { - // clusterkey is the concatenation of the slave cluster's - // hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent - required string clusterkey = 1; - optional string replicationEndpointImpl = 2; - repeated BytesBytesPair data = 3; - repeated NameStringPair configuration = 4; - repeated TableCF table_cfs = 5; - repeated bytes namespaces = 6; - optional int64 bandwidth = 7; -} - -/** - * Used by replication. Holds whether enabled or disabled - */ -message ReplicationState { - enum State { - ENABLED = 0; - DISABLED = 1; - } - required State state = 1; -} - -/** - * Used by replication. Holds the current position in an WAL file. - */ -message ReplicationHLogPosition { - required int64 position = 1; -} - -/** - * State of the switch. - */ -message SwitchState { - optional bool enabled = 1; -} diff --git a/hbase-protocol/src/main/protobuf/test.proto b/hbase-protocol/src/main/protobuf/test.proto deleted file mode 100644 index 89d47865c39..00000000000 --- a/hbase-protocol/src/main/protobuf/test.proto +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -option java_package = "org.apache.hadoop.hbase.ipc.protobuf.generated"; -option java_outer_classname = "TestProtos"; -option java_generate_equals_and_hash = true; - -message EmptyRequestProto { -} - -message EmptyResponseProto { -} - -message EchoRequestProto { - required string message = 1; -} - -message EchoResponseProto { - required string message = 1; -} - -message PauseRequestProto { - required uint32 ms = 1; -} - -message AddrResponseProto { - required string addr = 1; -} diff --git a/hbase-protocol/src/main/protobuf/test_rpc_service.proto b/hbase-protocol/src/main/protobuf/test_rpc_service.proto deleted file mode 100644 index 7c67ef2dd43..00000000000 --- a/hbase-protocol/src/main/protobuf/test_rpc_service.proto +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -syntax = "proto2"; - -option java_package = "org.apache.hadoop.hbase.ipc.protobuf.generated"; -option java_outer_classname = "TestRpcServiceProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; - -import "test.proto"; - - -/** - * A protobuf service for use in tests - */ -service TestProtobufRpcProto { - rpc ping(EmptyRequestProto) returns (EmptyResponseProto); - rpc echo(EchoRequestProto) returns (EchoResponseProto); - rpc error(EmptyRequestProto) returns (EmptyResponseProto); - rpc pause(PauseRequestProto) returns (EmptyResponseProto); - rpc addr(EmptyRequestProto) returns (AddrResponseProto); -} diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java index 2621de0eacb..128be02bb34 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java @@ -19,28 +19,26 @@ package org.apache.hadoop.hbase.rest.model; +import com.fasterxml.jackson.annotation.JsonProperty; import java.io.IOException; import java.io.Serializable; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlValue; - -import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell; - -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; /** * Representation of a cell. A cell is a single value associated a column and * optional qualifier, and either the timestamp when it was stored or the user- diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 05813f5b067..5cf303ec0c3 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -266,13 +266,6 @@ test-jar test - - - org.apache.hbase - hbase-protocol - org.apache.hbase hbase-protocol-shaded diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index b5ca6b5af67..eba262f64a0 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -50,7 +50,6 @@ org.apache.hadoop.hbase.master.DeadServer; org.apache.hadoop.hbase.master.HMaster; org.apache.hadoop.hbase.master.RegionState; org.apache.hadoop.hbase.master.ServerManager; -org.apache.hadoop.hbase.protobuf.ProtobufUtil; org.apache.hadoop.hbase.quotas.QuotaUtil; org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; org.apache.hadoop.hbase.rsgroup.RSGroupUtil; diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon index bf94c5b29b5..7947f294cd1 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon @@ -31,9 +31,6 @@ org.apache.hadoop.hbase.util.Bytes; org.apache.hadoop.hbase.HRegionInfo; org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.HBaseConfiguration; -org.apache.hadoop.hbase.protobuf.ProtobufUtil; -org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; -org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; org.apache.hadoop.hbase.util.DirectMemoryUtils; org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; java.lang.management.MemoryUsage; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index 086f85b26ec..842b25db513 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -358,12 +358,11 @@ public final class SnapshotDescriptionUtils { } /** - * Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} stored for the snapshot in the passed directory + * Read in the {@link SnapshotDescription} stored for the snapshot in the passed directory * @param fs filesystem where the snapshot was taken * @param snapshotDir directory where the snapshot was stored * @return the stored snapshot description - * @throws CorruptedSnapshotException if the - * snapshot cannot be read + * @throws CorruptedSnapshotException if the snapshot cannot be read */ public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir) throws CorruptedSnapshotException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java deleted file mode 100644 index 69e656fbe52..00000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java +++ /dev/null @@ -1,383 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.protobuf; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import com.google.protobuf.ByteString; -import java.io.IOException; -import java.nio.ByteBuffer; -import org.apache.hadoop.hbase.ByteBufferKeyValue; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderType; -import org.apache.hadoop.hbase.CellComparatorImpl; -import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; -import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.protobuf.generated.CellProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer; - -/** - * Class to test ProtobufUtil. - */ -@Category({ MiscTests.class, SmallTests.class}) -public class TestProtobufUtil { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProtobufUtil.class); - - @Test - public void testException() throws IOException { - NameBytesPair.Builder builder = NameBytesPair.newBuilder(); - final String omg = "OMG!!!"; - builder.setName("java.io.IOException"); - builder.setValue(ByteString.copyFrom(Bytes.toBytes(omg))); - Throwable t = ProtobufUtil.toException(builder.build()); - assertEquals(omg, t.getMessage()); - builder.clear(); - builder.setName("org.apache.hadoop.ipc.RemoteException"); - builder.setValue(ByteString.copyFrom(Bytes.toBytes(omg))); - t = ProtobufUtil.toException(builder.build()); - assertEquals(omg, t.getMessage()); - } - - /** - * Test basic Get conversions. - * - * @throws IOException - */ - @Test - public void testGet() throws IOException { - ClientProtos.Get.Builder getBuilder = ClientProtos.Get.newBuilder(); - getBuilder.setRow(ByteString.copyFromUtf8("row")); - Column.Builder columnBuilder = Column.newBuilder(); - columnBuilder.setFamily(ByteString.copyFromUtf8("f1")); - columnBuilder.addQualifier(ByteString.copyFromUtf8("c1")); - columnBuilder.addQualifier(ByteString.copyFromUtf8("c2")); - getBuilder.addColumn(columnBuilder.build()); - - columnBuilder.clear(); - columnBuilder.setFamily(ByteString.copyFromUtf8("f2")); - getBuilder.addColumn(columnBuilder.build()); - getBuilder.setLoadColumnFamiliesOnDemand(true); - ClientProtos.Get proto = getBuilder.build(); - // default fields - assertEquals(1, proto.getMaxVersions()); - assertEquals(true, proto.getCacheBlocks()); - - // set the default value for equal comparison - getBuilder = ClientProtos.Get.newBuilder(proto); - getBuilder.setMaxVersions(1); - getBuilder.setCacheBlocks(true); - getBuilder.setTimeRange(ProtobufUtil.toTimeRange(TimeRange.allTime())); - - Get get = ProtobufUtil.toGet(proto); - assertEquals(getBuilder.build(), ProtobufUtil.toGet(get)); - } - - /** - * Test Append Mutate conversions. - * - * @throws IOException - */ - @Test - public void testAppend() throws IOException { - long timeStamp = 111111; - MutationProto.Builder mutateBuilder = MutationProto.newBuilder(); - mutateBuilder.setRow(ByteString.copyFromUtf8("row")); - mutateBuilder.setMutateType(MutationType.APPEND); - mutateBuilder.setTimestamp(timeStamp); - ColumnValue.Builder valueBuilder = ColumnValue.newBuilder(); - valueBuilder.setFamily(ByteString.copyFromUtf8("f1")); - QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder(); - qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1")); - qualifierBuilder.setValue(ByteString.copyFromUtf8("v1")); - qualifierBuilder.setTimestamp(timeStamp); - valueBuilder.addQualifierValue(qualifierBuilder.build()); - qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2")); - qualifierBuilder.setValue(ByteString.copyFromUtf8("v2")); - valueBuilder.addQualifierValue(qualifierBuilder.build()); - mutateBuilder.addColumnValue(valueBuilder.build()); - - MutationProto proto = mutateBuilder.build(); - // default fields - assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability()); - - // set the default value for equal comparison - mutateBuilder = MutationProto.newBuilder(proto); - mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT); - - Append append = ProtobufUtil.toAppend(proto, null); - - // append always use the latest timestamp, - // reset the timestamp to the original mutate - mutateBuilder.setTimestamp(append.getTimestamp()); - mutateBuilder.setTimeRange(ProtobufUtil.toTimeRange(append.getTimeRange())); - assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.APPEND, append)); - } - - /** - * Test Delete Mutate conversions. - * - * @throws IOException - */ - @Test - public void testDelete() throws IOException { - MutationProto.Builder mutateBuilder = MutationProto.newBuilder(); - mutateBuilder.setRow(ByteString.copyFromUtf8("row")); - mutateBuilder.setMutateType(MutationType.DELETE); - mutateBuilder.setTimestamp(111111); - ColumnValue.Builder valueBuilder = ColumnValue.newBuilder(); - valueBuilder.setFamily(ByteString.copyFromUtf8("f1")); - QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder(); - qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1")); - qualifierBuilder.setDeleteType(DeleteType.DELETE_ONE_VERSION); - qualifierBuilder.setTimestamp(111222); - valueBuilder.addQualifierValue(qualifierBuilder.build()); - qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2")); - qualifierBuilder.setDeleteType(DeleteType.DELETE_MULTIPLE_VERSIONS); - qualifierBuilder.setTimestamp(111333); - valueBuilder.addQualifierValue(qualifierBuilder.build()); - mutateBuilder.addColumnValue(valueBuilder.build()); - - MutationProto proto = mutateBuilder.build(); - // default fields - assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability()); - - // set the default value for equal comparison - mutateBuilder = MutationProto.newBuilder(proto); - mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT); - - Delete delete = ProtobufUtil.toDelete(proto); - - // delete always have empty value, - // add empty value to the original mutate - for (ColumnValue.Builder column: - mutateBuilder.getColumnValueBuilderList()) { - for (QualifierValue.Builder qualifier: - column.getQualifierValueBuilderList()) { - qualifier.setValue(ByteString.EMPTY); - } - } - assertEquals(mutateBuilder.build(), - ProtobufUtil.toMutation(MutationType.DELETE, delete)); - } - - /** - * Test Increment Mutate conversions. - * - * @throws IOException - */ - @Test - public void testIncrement() throws IOException { - long timeStamp = 111111; - MutationProto.Builder mutateBuilder = MutationProto.newBuilder(); - mutateBuilder.setRow(ByteString.copyFromUtf8("row")); - mutateBuilder.setMutateType(MutationType.INCREMENT); - ColumnValue.Builder valueBuilder = ColumnValue.newBuilder(); - valueBuilder.setFamily(ByteString.copyFromUtf8("f1")); - QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder(); - qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1")); - qualifierBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(11L))); - qualifierBuilder.setTimestamp(timeStamp); - valueBuilder.addQualifierValue(qualifierBuilder.build()); - qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2")); - qualifierBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(22L))); - valueBuilder.addQualifierValue(qualifierBuilder.build()); - mutateBuilder.addColumnValue(valueBuilder.build()); - - MutationProto proto = mutateBuilder.build(); - // default fields - assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability()); - - // set the default value for equal comparison - mutateBuilder = MutationProto.newBuilder(proto); - mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT); - - Increment increment = ProtobufUtil.toIncrement(proto, null); - mutateBuilder.setTimestamp(increment.getTimestamp()); - mutateBuilder.setTimeRange(ProtobufUtil.toTimeRange(increment.getTimeRange())); - assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.INCREMENT, increment)); - } - - /** - * Test Put Mutate conversions. - * - * @throws IOException - */ - @Test - public void testPut() throws IOException { - MutationProto.Builder mutateBuilder = MutationProto.newBuilder(); - mutateBuilder.setRow(ByteString.copyFromUtf8("row")); - mutateBuilder.setMutateType(MutationType.PUT); - mutateBuilder.setTimestamp(111111); - ColumnValue.Builder valueBuilder = ColumnValue.newBuilder(); - valueBuilder.setFamily(ByteString.copyFromUtf8("f1")); - QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder(); - qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1")); - qualifierBuilder.setValue(ByteString.copyFromUtf8("v1")); - valueBuilder.addQualifierValue(qualifierBuilder.build()); - qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2")); - qualifierBuilder.setValue(ByteString.copyFromUtf8("v2")); - qualifierBuilder.setTimestamp(222222); - valueBuilder.addQualifierValue(qualifierBuilder.build()); - mutateBuilder.addColumnValue(valueBuilder.build()); - - MutationProto proto = mutateBuilder.build(); - // default fields - assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability()); - - // set the default value for equal comparison - mutateBuilder = MutationProto.newBuilder(proto); - mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT); - - Put put = ProtobufUtil.toPut(proto); - - // put value always use the default timestamp if no - // value level timestamp specified, - // add the timestamp to the original mutate - long timestamp = put.getTimestamp(); - for (ColumnValue.Builder column: - mutateBuilder.getColumnValueBuilderList()) { - for (QualifierValue.Builder qualifier: - column.getQualifierValueBuilderList()) { - if (!qualifier.hasTimestamp()) { - qualifier.setTimestamp(timestamp); - } - } - } - assertEquals(mutateBuilder.build(), - ProtobufUtil.toMutation(MutationType.PUT, put)); - } - - /** - * Test basic Scan conversions. - * - * @throws IOException - */ - @Test - public void testScan() throws IOException { - ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder(); - scanBuilder.setStartRow(ByteString.copyFromUtf8("row1")); - scanBuilder.setStopRow(ByteString.copyFromUtf8("row2")); - Column.Builder columnBuilder = Column.newBuilder(); - columnBuilder.setFamily(ByteString.copyFromUtf8("f1")); - columnBuilder.addQualifier(ByteString.copyFromUtf8("c1")); - columnBuilder.addQualifier(ByteString.copyFromUtf8("c2")); - scanBuilder.addColumn(columnBuilder.build()); - - columnBuilder.clear(); - columnBuilder.setFamily(ByteString.copyFromUtf8("f2")); - scanBuilder.addColumn(columnBuilder.build()); - - ClientProtos.Scan proto = scanBuilder.build(); - - // Verify default values - assertEquals(1, proto.getMaxVersions()); - assertEquals(true, proto.getCacheBlocks()); - - // Verify fields survive ClientProtos.Scan -> Scan -> ClientProtos.Scan - // conversion - scanBuilder = ClientProtos.Scan.newBuilder(proto); - scanBuilder.setMaxVersions(2); - scanBuilder.setCacheBlocks(false); - scanBuilder.setCaching(1024); - scanBuilder.setTimeRange(ProtobufUtil.toTimeRange(TimeRange.allTime())); - scanBuilder.setIncludeStopRow(false); - ClientProtos.Scan expectedProto = scanBuilder.build(); - - ClientProtos.Scan actualProto = ProtobufUtil.toScan( - ProtobufUtil.toScan(expectedProto)); - assertEquals(expectedProto, actualProto); - } - - @Test - public void testToCell() throws Exception { - KeyValue kv1 = - new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); - KeyValue kv2 = - new KeyValue(Bytes.toBytes("bbb"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); - KeyValue kv3 = - new KeyValue(Bytes.toBytes("ccc"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]); - byte[] arr = new byte[kv1.getLength() + kv2.getLength() + kv3.getLength()]; - System.arraycopy(kv1.getBuffer(), kv1.getOffset(), arr, 0, kv1.getLength()); - System.arraycopy(kv2.getBuffer(), kv2.getOffset(), arr, kv1.getLength(), kv2.getLength()); - System.arraycopy(kv3.getBuffer(), kv3.getOffset(), arr, kv1.getLength() + kv2.getLength(), - kv3.getLength()); - ByteBuffer dbb = ByteBuffer.allocateDirect(arr.length); - dbb.put(arr); - ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength()); - CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV); - Cell newOffheapKV = - ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell); - assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0); - } - - @Test - public void testMetaRegionState() throws Exception { - ServerName serverName = ServerName.valueOf("localhost", 1234, 5678); - // New region state style. - for (RegionState.State state: RegionState.State.values()) { - RegionState regionState = - new RegionState(RegionInfoBuilder.FIRST_META_REGIONINFO, state, serverName); - MetaRegionServer metars = MetaRegionServer.newBuilder() - .setServer(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(serverName)) - .setRpcVersion(HConstants.RPC_CURRENT_VERSION) - .setState(state.convert()).build(); - // Serialize - byte[] data = ProtobufUtil.prependPBMagic(metars.toByteArray()); - ProtobufUtil.prependPBMagic(data); - // Deserialize - RegionState regionStateNew = - org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.parseMetaRegionStateFrom(data, 1); - assertEquals(regionState.getServerName(), regionStateNew.getServerName()); - assertEquals(regionState.getState(), regionStateNew.getState()); - } - // old style. - RegionState rs = - org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.parseMetaRegionStateFrom( - serverName.getVersionedBytes(), 1); - assertEquals(serverName, rs.getServerName()); - assertEquals(rs.getState(), RegionState.State.OPEN); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java index 236c03545be..7b0e6cbdd8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hbase.protobuf; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java index 7cdf2f36ec0..ca1fe8bc2b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; @@ -48,6 +47,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; + @Category({SecurityTests.class, MediumTests.class}) public class TestVisibilityLabelsWithSLGStack { diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml index 69d6d71bd5f..05121871d92 100644 --- a/hbase-testing-util/pom.xml +++ b/hbase-testing-util/pom.xml @@ -55,12 +55,6 @@ - - org.apache.hbase - hbase-protocol - jar - compile - org.apache.hbase hbase-client diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml index 943a7a29494..948a609a32f 100644 --- a/hbase-thrift/pom.xml +++ b/hbase-thrift/pom.xml @@ -165,10 +165,6 @@ test-jar test - - org.apache.hbase - hbase-protocol - org.apache.hbase hbase-client diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java index 9f92cdfde93..18fd3012a07 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.visibility.ScanLabelGenerator; @@ -73,6 +72,8 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; + @Category({ClientTests.class, MediumTests.class}) public class TestThriftHBaseServiceHandlerWithLabels { diff --git a/pom.xml b/pom.xml index dfde14405dc..68be13ee773 100755 --- a/pom.xml +++ b/pom.xml @@ -70,7 +70,6 @@ hbase-thrift hbase-shell hbase-protocol-shaded - hbase-protocol hbase-client hbase-hadoop-compat hbase-common @@ -1682,11 +1681,6 @@ hbase-protocol-shaded ${project.version} - - org.apache.hbase - hbase-protocol - ${project.version} - org.apache.hbase hbase-procedure