HBASE-23798 Remove hbase-prototcol module (#1360)
Signed-off-by: Jan Hentschel <jan.hentschel@ultratendency.com> Signed-off-by: stack <stack@apache.org>
This commit is contained in:
parent
aaae46c976
commit
5d4e020c3d
|
@ -268,10 +268,6 @@
|
|||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-metrics</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol-shaded</artifactId>
|
||||
|
|
|
@ -95,10 +95,6 @@
|
|||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol-shaded</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
</dependency>
|
||||
<!-- General dependencies -->
|
||||
<dependency>
|
||||
<groupId>com.github.stephenc.findbugs</groupId>
|
||||
|
|
|
@ -1700,19 +1700,26 @@ public interface Admin extends Abortable, Closeable {
|
|||
List<QuotaSettings> getQuota(QuotaFilter filter) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the active
|
||||
* master. <p> The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access
|
||||
* a published coprocessor {@link com.google.protobuf.Service} using standard protobuf service
|
||||
* invocations: </p> <div style="background-color: #cccccc; padding: 2px">
|
||||
* <blockquote><pre>
|
||||
* Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
|
||||
* instance connected to the active master.
|
||||
* <p/>
|
||||
* The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be
|
||||
* used to access a published coprocessor
|
||||
* {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} using standard protobuf service
|
||||
* invocations:
|
||||
* <p/>
|
||||
* <div style="background-color: #cccccc; padding: 2px">
|
||||
* <blockquote>
|
||||
* <pre>
|
||||
* CoprocessorRpcChannel channel = myAdmin.coprocessorService();
|
||||
* MyService.BlockingInterface service = MyService.newBlockingStub(channel);
|
||||
* MyCallRequest request = MyCallRequest.newBuilder()
|
||||
* ...
|
||||
* .build();
|
||||
* MyCallResponse response = service.myCall(null, request);
|
||||
* </pre></blockquote></div>
|
||||
*
|
||||
* </pre>
|
||||
* </blockquote>
|
||||
* </div>
|
||||
* @return A MasterCoprocessorRpcChannel instance
|
||||
* @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
|
||||
* more. Use the coprocessorService methods in {@link AsyncAdmin} instead.
|
||||
|
@ -1722,24 +1729,25 @@ public interface Admin extends Abortable, Closeable {
|
|||
|
||||
|
||||
/**
|
||||
* Creates and returns a {@link com.google.protobuf.RpcChannel} instance
|
||||
* connected to the passed region server.
|
||||
*
|
||||
* <p>
|
||||
* The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
|
||||
* coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
|
||||
* </p>
|
||||
*
|
||||
* <div style="background-color: #cccccc; padding: 2px">
|
||||
* <blockquote><pre>
|
||||
* Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
|
||||
* instance connected to the passed region server.
|
||||
* <p/>
|
||||
* The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be
|
||||
* used to access a published coprocessor
|
||||
* {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} using standard protobuf service
|
||||
* invocations:
|
||||
* <p/>
|
||||
* <div style="background-color: #cccccc; padding: 2px"> <blockquote>
|
||||
* <pre>
|
||||
* CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
|
||||
* MyService.BlockingInterface service = MyService.newBlockingStub(channel);
|
||||
* MyCallRequest request = MyCallRequest.newBuilder()
|
||||
* ...
|
||||
* .build();
|
||||
* MyCallResponse response = service.myCall(null, request);
|
||||
* </pre></blockquote></div>
|
||||
*
|
||||
* </pre>
|
||||
* </blockquote>
|
||||
* </div>
|
||||
* @param serverName the server name to which the endpoint call is made
|
||||
* @return A RegionServerCoprocessorRpcChannel instance
|
||||
* @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
|
||||
|
|
|
@ -509,18 +509,18 @@ public interface Table extends Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the table
|
||||
* region containing the specified row. The row given does not actually have to exist. Whichever
|
||||
* region would contain the row based on start and end keys will be used. Note that the
|
||||
* {@code row} parameter is also not passed to the coprocessor handler registered for this
|
||||
* protocol, unless the {@code row} is separately passed as an argument in the service request.
|
||||
* The parameter here is only used to locate the region used to handle the call.
|
||||
* <p>
|
||||
* The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
|
||||
* coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
|
||||
* </p>
|
||||
* Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
|
||||
* instance connected to the table region containing the specified row. The row given does not
|
||||
* actually have to exist. Whichever region would contain the row based on start and end keys will
|
||||
* be used. Note that the {@code row} parameter is also not passed to the coprocessor handler
|
||||
* registered for this protocol, unless the {@code row} is separately passed as an argument in the
|
||||
* service request. The parameter here is only used to locate the region used to handle the call.
|
||||
* <p/>
|
||||
* The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be
|
||||
* used to access a published coprocessor {@link Service} using standard protobuf service
|
||||
* invocations:
|
||||
* <p/>
|
||||
* <div style="background-color: #cccccc; padding: 2px"> <blockquote>
|
||||
*
|
||||
* <pre>
|
||||
* CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
|
||||
* MyService.BlockingInterface service = MyService.newBlockingStub(channel);
|
||||
|
@ -529,8 +529,8 @@ public interface Table extends Closeable {
|
|||
* .build();
|
||||
* MyCallResponse response = service.myCall(null, request);
|
||||
* </pre>
|
||||
*
|
||||
* </blockquote></div>
|
||||
* </blockquote>
|
||||
* </div>
|
||||
* @param row The row key used to identify the remote region location
|
||||
* @return A CoprocessorRpcChannel instance
|
||||
* @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
|
||||
|
@ -543,10 +543,10 @@ public interface Table extends Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
|
||||
* region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
|
||||
* invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
|
||||
* with each {@link com.google.protobuf.Service} instance.
|
||||
* Creates an instance of the given {@link Service} subclass for each table region spanning the
|
||||
* range from the {@code startKey} row to {@code endKey} row (inclusive), and invokes the passed
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method with each
|
||||
* {@link Service} instance.
|
||||
* @param service the protocol buffer {@code Service} implementation to call
|
||||
* @param startKey start region selection with region containing this row. If {@code null}, the
|
||||
* selection will start with the first table region.
|
||||
|
@ -554,9 +554,9 @@ public interface Table extends Closeable {
|
|||
* {@code null}, selection will continue through the last table region.
|
||||
* @param callable this instance's
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will be
|
||||
* invoked once per table region, using the {@link com.google.protobuf.Service} instance
|
||||
* connected to that region.
|
||||
* @param <T> the {@link com.google.protobuf.Service} subclass to connect to
|
||||
* invoked once per table region, using the {@link Service} instance connected to that
|
||||
* region.
|
||||
* @param <T> the {@link Service} subclass to connect to
|
||||
* @param <R> Return type for the {@code callable} parameter's
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
|
||||
* @return a map of result values keyed by region name
|
||||
|
@ -585,16 +585,15 @@ public interface Table extends Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
|
||||
* region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
|
||||
* invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
|
||||
* with each {@link Service} instance.
|
||||
* <p>
|
||||
* Creates an instance of the given {@link Service} subclass for each table region spanning the
|
||||
* range from the {@code startKey} row to {@code endKey} row (inclusive), and invokes the passed
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method with each
|
||||
* {@link Service} instance.
|
||||
* <p/>
|
||||
* The given
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
|
||||
* method will be called with the return value from each region's
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.
|
||||
* </p>
|
||||
* @param service the protocol buffer {@code Service} implementation to call
|
||||
* @param startKey start region selection with region containing this row. If {@code null}, the
|
||||
* selection will start with the first table region.
|
||||
|
@ -622,10 +621,10 @@ public interface Table extends Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
|
||||
* region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
|
||||
* the invocations to the same region server will be batched into one call. The coprocessor
|
||||
* service is invoked according to the service instance, method name and parameters.
|
||||
* Creates an instance of the given {@link Service} subclass for each table region spanning the
|
||||
* range from the {@code startKey} row to {@code endKey} row (inclusive), all the invocations to
|
||||
* the same region server will be batched into one call. The coprocessor service is invoked
|
||||
* according to the service instance, method name and parameters.
|
||||
* @param methodDescriptor the descriptor for the protobuf service method to call.
|
||||
* @param request the method call parameters
|
||||
* @param startKey start region selection with region containing this row. If {@code null}, the
|
||||
|
@ -661,15 +660,14 @@ public interface Table extends Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
|
||||
* region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
|
||||
* the invocations to the same region server will be batched into one call. The coprocessor
|
||||
* service is invoked according to the service instance, method name and parameters.
|
||||
* <p>
|
||||
* Creates an instance of the given {@link Service} subclass for each table region spanning the
|
||||
* range from the {@code startKey} row to {@code endKey} row (inclusive), all the invocations to
|
||||
* the same region server will be batched into one call. The coprocessor service is invoked
|
||||
* according to the service instance, method name and parameters.
|
||||
* <p/>
|
||||
* The given
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
|
||||
* method will be called with the return value from each region's invocation.
|
||||
* </p>
|
||||
* @param methodDescriptor the descriptor for the protobuf service method to call.
|
||||
* @param request the method call parameters
|
||||
* @param startKey start region selection with region containing this row. If {@code null}, the
|
||||
|
|
|
@ -29,22 +29,19 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
*/
|
||||
@InterfaceAudience.Public
|
||||
public abstract class Batch {
|
||||
|
||||
/**
|
||||
* Defines a unit of work to be executed.
|
||||
*
|
||||
* <p>
|
||||
* <p/>
|
||||
* When used with
|
||||
* {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[],
|
||||
* org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}
|
||||
* the implementations {@link Batch.Call#call(Object)} method will be invoked
|
||||
* with a proxy to each region's coprocessor {@link com.google.protobuf.Service} implementation.
|
||||
* </p>
|
||||
* {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], Batch.Call)}
|
||||
* the implementations {@link Batch.Call#call(Object)} method will be invoked with a proxy to each
|
||||
* region's coprocessor {@link org.apache.hbase.thirdparty.com.google.protobuf.Service}
|
||||
* implementation.
|
||||
* @see org.apache.hadoop.hbase.client.coprocessor.Batch
|
||||
* @see org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])
|
||||
* @see org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[],
|
||||
* org.apache.hadoop.hbase.client.coprocessor.Batch.Call)
|
||||
* @param <T> the instance type to be passed to
|
||||
* {@link Batch.Call#call(Object)}
|
||||
* @see org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], Batch.Call)
|
||||
* @param <T> the instance type to be passed to {@link Batch.Call#call(Object)}
|
||||
* @param <R> the return type from {@link Batch.Call#call(Object)}
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
|
@ -53,17 +50,13 @@ public abstract class Batch {
|
|||
}
|
||||
|
||||
/**
|
||||
* Defines a generic callback to be triggered for each {@link Batch.Call#call(Object)}
|
||||
* result.
|
||||
*
|
||||
* <p>
|
||||
* Defines a generic callback to be triggered for each {@link Batch.Call#call(Object)} result.
|
||||
* <p/>
|
||||
* When used with
|
||||
* {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[],
|
||||
* org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}
|
||||
* the implementation's {@link Batch.Callback#update(byte[], byte[], Object)}
|
||||
* method will be called with the {@link Batch.Call#call(Object)} return value
|
||||
* from each region in the selected range.
|
||||
* </p>
|
||||
* {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], Batch.Call)}
|
||||
* the implementation's {@link Batch.Callback#update(byte[], byte[], Object)} method will be
|
||||
* called with the {@link Batch.Call#call(Object)} return value from each region in the selected
|
||||
* range.
|
||||
* @param <R> the return type from the associated {@link Batch.Call#call(Object)}
|
||||
* @see org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[],
|
||||
* org.apache.hadoop.hbase.client.coprocessor.Batch.Call)
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel;
|
|||
|
||||
/**
|
||||
* Base interface which provides clients with an RPC connection to call coprocessor endpoint
|
||||
* {@link com.google.protobuf.Service}s.
|
||||
* {@link org.apache.hbase.thirdparty.com.google.protobuf.Service}s.
|
||||
* <p/>
|
||||
* Note that clients should not use this class directly, except through
|
||||
* {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}.
|
||||
|
|
|
@ -26,14 +26,13 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
|
|||
import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
|
||||
|
||||
/**
|
||||
* Used for server-side protobuf RPC service invocations. This handler allows
|
||||
* invocation exceptions to easily be passed through to the RPC server from coprocessor
|
||||
* {@link com.google.protobuf.Service} implementations.
|
||||
*
|
||||
* <p>
|
||||
* When implementing {@link com.google.protobuf.Service} defined methods,
|
||||
* coprocessor endpoints can use the following pattern to pass exceptions back to the RPC client:
|
||||
* <code>
|
||||
* Used for server-side protobuf RPC service invocations. This handler allows invocation exceptions
|
||||
* to easily be passed through to the RPC server from coprocessor
|
||||
* {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} implementations.
|
||||
* <p/>
|
||||
* When implementing {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} defined
|
||||
* methods, coprocessor endpoints can use the following pattern to pass exceptions back to the RPC
|
||||
* client: <code>
|
||||
* public void myMethod(RpcController controller, MyRequest request,
|
||||
* RpcCallback<MyResponse> done) {
|
||||
* MyResponse response = null;
|
||||
|
@ -47,7 +46,6 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
|
|||
* done.run(response);
|
||||
* }
|
||||
* </code>
|
||||
* </p>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class ServerRpcController implements RpcController {
|
||||
|
@ -98,7 +96,8 @@ public class ServerRpcController implements RpcController {
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets an exception to be communicated back to the {@link com.google.protobuf.Service} client.
|
||||
* Sets an exception to be communicated back to the
|
||||
* {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} client.
|
||||
* @param ioe the exception encountered during execution of the service method
|
||||
*/
|
||||
public void setFailedOn(IOException ioe) {
|
||||
|
@ -108,9 +107,9 @@ public class ServerRpcController implements RpcController {
|
|||
|
||||
/**
|
||||
* Returns any exception thrown during service method invocation, or {@code null} if no exception
|
||||
* was thrown. This can be used by clients to receive exceptions generated by RPC calls, even
|
||||
* when {@link RpcCallback}s are used and no {@link com.google.protobuf.ServiceException} is
|
||||
* declared.
|
||||
* was thrown. This can be used by clients to receive exceptions generated by RPC calls, even when
|
||||
* {@link RpcCallback}s are used and no
|
||||
* {@link org.apache.hbase.thirdparty.com.google.protobuf.ServiceException} is declared.
|
||||
*/
|
||||
public IOException getFailedOn() {
|
||||
return serviceException;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2992,7 +2992,7 @@ public final class ProtobufUtil {
|
|||
|
||||
/**
|
||||
* Creates {@link CompactionState} from
|
||||
* {@link org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState}
|
||||
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState}
|
||||
* state
|
||||
* @param state the protobuf CompactionState
|
||||
* @return CompactionState
|
||||
|
@ -3011,7 +3011,8 @@ public final class ProtobufUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type}
|
||||
* Creates
|
||||
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type}
|
||||
* from {@link SnapshotType}
|
||||
* @param type the SnapshotDescription type
|
||||
* @return the protobuf SnapshotDescription type
|
||||
|
@ -3022,7 +3023,8 @@ public final class ProtobufUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type}
|
||||
* Creates
|
||||
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type}
|
||||
* from the type of SnapshotDescription string
|
||||
* @param snapshotDesc string representing the snapshot description type
|
||||
* @return the protobuf SnapshotDescription type
|
||||
|
@ -3033,8 +3035,8 @@ public final class ProtobufUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates {@link SnapshotType} from the type of
|
||||
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}
|
||||
* Creates {@link SnapshotType} from the
|
||||
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type}
|
||||
* @param type the snapshot description type
|
||||
* @return the protobuf SnapshotDescription type
|
||||
*/
|
||||
|
@ -3044,7 +3046,7 @@ public final class ProtobufUtil {
|
|||
|
||||
/**
|
||||
* Convert from {@link SnapshotDescription} to
|
||||
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}
|
||||
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription}
|
||||
* @param snapshotDesc the POJO SnapshotDescription
|
||||
* @return the protobuf SnapshotDescription
|
||||
*/
|
||||
|
@ -3076,7 +3078,7 @@ public final class ProtobufUtil {
|
|||
|
||||
/**
|
||||
* Convert from
|
||||
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} to
|
||||
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription} to
|
||||
* {@link SnapshotDescription}
|
||||
* @param snapshotDesc the protobuf SnapshotDescription
|
||||
* @return the POJO SnapshotDescription
|
||||
|
|
|
@ -54,12 +54,12 @@ public final class ClientSnapshotDescriptionUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns a single line (no \n) representation of snapshot metadata. Use this instead of
|
||||
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription#toString()}.
|
||||
* Returns a single line (no \n) representation of snapshot metadata. Use this instead of the
|
||||
* {@code toString} method of
|
||||
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription}.
|
||||
* We don't replace
|
||||
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}'s
|
||||
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription}'s
|
||||
* {@code toString}, because it is auto-generated by protoc.
|
||||
*
|
||||
* @param snapshot description of the snapshot
|
||||
* @return single line string with a summary of the snapshot parameters
|
||||
*/
|
||||
|
|
|
@ -107,10 +107,6 @@
|
|||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol-shaded</artifactId>
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
|
|||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.mapreduce.ExportUtils;
|
||||
import org.apache.hadoop.hbase.mapreduce.Import;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos;
|
||||
import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
|
||||
import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
|
@ -82,6 +81,8 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos;
|
||||
|
||||
@Category({MediumTests.class})
|
||||
public class TestSecureExport {
|
||||
@ClassRule
|
||||
|
|
|
@ -91,10 +91,6 @@
|
|||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-common</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-client</artifactId>
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.types;
|
|||
import com.google.protobuf.CodedInputStream;
|
||||
import com.google.protobuf.CodedOutputStream;
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
|
||||
import org.apache.hadoop.hbase.example.protobuf.generated.CellMessage;
|
||||
import org.apache.hadoop.hbase.util.PositionedByteRange;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
@ -28,15 +28,15 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
* An example for using protobuf objects with {@link DataType} API.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class PBCell extends PBType<CellProtos.Cell> {
|
||||
public class PBCell extends PBType<CellMessage.Cell> {
|
||||
@Override
|
||||
public Class<CellProtos.Cell> encodedClass() {
|
||||
return CellProtos.Cell.class;
|
||||
public Class<CellMessage.Cell> encodedClass() {
|
||||
return CellMessage.Cell.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int skip(PositionedByteRange src) {
|
||||
CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder();
|
||||
CellMessage.Cell.Builder builder = CellMessage.Cell.newBuilder();
|
||||
CodedInputStream is = inputStreamFromByteRange(src);
|
||||
is.setSizeLimit(src.getLength());
|
||||
try {
|
||||
|
@ -50,12 +50,12 @@ public class PBCell extends PBType<CellProtos.Cell> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CellProtos.Cell decode(PositionedByteRange src) {
|
||||
CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder();
|
||||
public CellMessage.Cell decode(PositionedByteRange src) {
|
||||
CellMessage.Cell.Builder builder = CellMessage.Cell.newBuilder();
|
||||
CodedInputStream is = inputStreamFromByteRange(src);
|
||||
is.setSizeLimit(src.getLength());
|
||||
try {
|
||||
CellProtos.Cell ret = builder.mergeFrom(is).build();
|
||||
CellMessage.Cell ret = builder.mergeFrom(is).build();
|
||||
src.setPosition(src.getPosition() + is.getTotalBytesRead());
|
||||
return ret;
|
||||
} catch (IOException e) {
|
||||
|
@ -64,7 +64,7 @@ public class PBCell extends PBType<CellProtos.Cell> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int encode(PositionedByteRange dst, CellProtos.Cell val) {
|
||||
public int encode(PositionedByteRange dst, CellMessage.Cell val) {
|
||||
CodedOutputStream os = outputStreamFromByteRange(dst);
|
||||
try {
|
||||
int before = os.spaceLeft(), after, written;
|
||||
|
|
|
@ -25,8 +25,7 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* A base-class for {@link DataType} implementations backed by protobuf. See
|
||||
* {@code PBKeyValue} in {@code hbase-examples} module.
|
||||
* A base-class for {@link DataType} implementations backed by protobuf. See {@link PBCell}.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public abstract class PBType<T extends Message> implements DataType<T> {
|
||||
|
@ -58,7 +57,8 @@ public abstract class PBType<T extends Message> implements DataType<T> {
|
|||
/**
|
||||
* Create a {@link CodedInputStream} from a {@link PositionedByteRange}. Be sure to update
|
||||
* {@code src}'s position after consuming from the stream.
|
||||
* <p>For example:
|
||||
* <p/>
|
||||
* For example:
|
||||
* <pre>
|
||||
* Foo.Builder builder = ...
|
||||
* CodedInputStream is = inputStreamFromByteRange(src);
|
||||
|
@ -67,16 +67,15 @@ public abstract class PBType<T extends Message> implements DataType<T> {
|
|||
* </pre>
|
||||
*/
|
||||
public static CodedInputStream inputStreamFromByteRange(PositionedByteRange src) {
|
||||
return CodedInputStream.newInstance(
|
||||
src.getBytes(),
|
||||
src.getOffset() + src.getPosition(),
|
||||
return CodedInputStream.newInstance(src.getBytes(), src.getOffset() + src.getPosition(),
|
||||
src.getRemaining());
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {@link CodedOutputStream} from a {@link PositionedByteRange}. Be sure to update
|
||||
* {@code dst}'s position after writing to the stream.
|
||||
* <p>For example:
|
||||
* <p/>
|
||||
* For example:
|
||||
* <pre>
|
||||
* CodedOutputStream os = outputStreamFromByteRange(dst);
|
||||
* int before = os.spaceLeft(), after, written;
|
||||
|
@ -87,10 +86,7 @@ public abstract class PBType<T extends Message> implements DataType<T> {
|
|||
* </pre>
|
||||
*/
|
||||
public static CodedOutputStream outputStreamFromByteRange(PositionedByteRange dst) {
|
||||
return CodedOutputStream.newInstance(
|
||||
dst.getBytes(),
|
||||
dst.getOffset() + dst.getPosition(),
|
||||
dst.getRemaining()
|
||||
);
|
||||
return CodedOutputStream.newInstance(dst.getBytes(), dst.getOffset() + dst.getPosition(),
|
||||
dst.getRemaining());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,11 +16,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package org.apache.hadoop.hbase.example.protobuf.generated;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.ipc.protobuf.generated";
|
||||
option java_outer_classname = "TestProcedureProtos";
|
||||
option java_generic_services = true;
|
||||
|
||||
message TestTableDDLStateData {
|
||||
required string table_name = 1;
|
||||
message Cell {
|
||||
optional bytes row = 1;
|
||||
}
|
|
@ -18,19 +18,12 @@
|
|||
package org.apache.hadoop.hbase.types;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellBuilderType;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
|
||||
import com.google.protobuf.ByteString;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
|
||||
import org.apache.hadoop.hbase.example.protobuf.generated.CellMessage;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.PositionedByteRange;
|
||||
import org.apache.hadoop.hbase.util.SimplePositionedByteRange;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -41,8 +34,7 @@ import org.junit.experimental.categories.Category;
|
|||
public class TestPBCell {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestPBCell.class);
|
||||
public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestPBCell.class);
|
||||
|
||||
private static final PBCell CODEC = new PBCell();
|
||||
|
||||
|
@ -51,16 +43,14 @@ public class TestPBCell {
|
|||
*/
|
||||
@Test
|
||||
public void testRoundTrip() {
|
||||
final Cell cell = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("fam"),
|
||||
Bytes.toBytes("qual"), Bytes.toBytes("val"));
|
||||
CellProtos.Cell c = ProtobufUtil.toCell(cell), decoded;
|
||||
PositionedByteRange pbr = new SimplePositionedByteRange(c.getSerializedSize());
|
||||
CellMessage.Cell cell =
|
||||
CellMessage.Cell.newBuilder().setRow(ByteString.copyFromUtf8("row")).build();
|
||||
PositionedByteRange pbr = new SimplePositionedByteRange(cell.getSerializedSize());
|
||||
pbr.setPosition(0);
|
||||
int encodedLength = CODEC.encode(pbr, c);
|
||||
int encodedLength = CODEC.encode(pbr, cell);
|
||||
pbr.setPosition(0);
|
||||
decoded = CODEC.decode(pbr);
|
||||
CellMessage.Cell decoded = CODEC.decode(pbr);
|
||||
assertEquals(encodedLength, pbr.getPosition());
|
||||
assertTrue(CellUtil.equals(cell, ProtobufUtil
|
||||
.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), decoded)));
|
||||
assertEquals("row", decoded.getRow().toStringUtf8());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -159,10 +159,6 @@
|
|||
<artifactId>hbase-common</artifactId>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol-shaded</artifactId>
|
||||
|
|
|
@ -105,18 +105,6 @@
|
|||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<!--Needed by ExportSnapshot. It is reading
|
||||
Snapshot protos. TODO: Move to internal types.-->
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<!--Needed by ExportSnapshot. It is reading
|
||||
Snapshot protos. TODO: Move to internal types.-->
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
<artifactId>protobuf-java</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol-shaded</artifactId>
|
||||
|
|
|
@ -805,7 +805,6 @@ public class TableMapReduceUtil {
|
|||
addDependencyJarsForClasses(conf,
|
||||
// explicitly pull a class from each module
|
||||
org.apache.hadoop.hbase.HConstants.class, // hbase-common
|
||||
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class, // hbase-protocol
|
||||
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, // hbase-protocol-shaded
|
||||
org.apache.hadoop.hbase.client.Put.class, // hbase-client
|
||||
org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server
|
||||
|
|
|
@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.security.visibility.Authorizations;
|
||||
import org.apache.hadoop.hbase.security.visibility.CellVisibility;
|
||||
|
@ -75,6 +74,8 @@ import org.junit.rules.TestName;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
|
||||
|
||||
@Category({MapReduceTests.class, LargeTests.class})
|
||||
public class TestImportTSVWithVisibilityLabels implements Configurable {
|
||||
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
ON PROTOBUFS
|
||||
This maven module has core protobuf definition files ('.protos') used by hbase
|
||||
Coprocessor Endpoints that ship with hbase core including tests. Coprocessor
|
||||
Endpoints are meant to be standalone, independent code not reliant on hbase
|
||||
internals. They define their Service using protobuf. The protobuf version
|
||||
they use can be distinct from that used by HBase internally since HBase started
|
||||
shading its protobuf references. Endpoints have no access to the shaded protobuf
|
||||
hbase uses. They do have access to the content of hbase-protocol -- the
|
||||
.protos found in here -- but avoid using as much of this as you can as it is
|
||||
liable to change.
|
||||
|
||||
Generation of java files from protobuf .proto files included here is done as
|
||||
part of the build.
|
|
@ -1,216 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<!--
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
-->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<artifactId>hbase-build-configuration</artifactId>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<version>3.0.0-SNAPSHOT</version>
|
||||
<relativePath>../hbase-build-configuration</relativePath>
|
||||
</parent>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
<name>Apache HBase - Protocol</name>
|
||||
<description>Protobuf protocol classes used by HBase to communicate.</description>
|
||||
<properties>
|
||||
<maven.javadoc.skip>true</maven.javadoc.skip>
|
||||
</properties>
|
||||
<build>
|
||||
<plugins>
|
||||
<!-- Make a jar and put the sources in the jar -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<!--Make it so assembly:single does nothing in here-->
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<configuration>
|
||||
<skipAssembly>true</skipAssembly>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<!-- Always skip the second part executions, since we only run simple unit tests in this module -->
|
||||
<executions>
|
||||
<execution>
|
||||
<id>secondPartTestsExecution</id>
|
||||
<phase>test</phase>
|
||||
<goals>
|
||||
<goal>test</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>true</skip>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.xolstice.maven.plugins</groupId>
|
||||
<artifactId>protobuf-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>compile-protoc</id>
|
||||
<phase>generate-sources</phase>
|
||||
<goals>
|
||||
<goal>compile</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>com.google.code.maven-replacer-plugin</groupId>
|
||||
<artifactId>replacer</artifactId>
|
||||
<version>1.5.3</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>process-sources</phase>
|
||||
<goals>
|
||||
<goal>replace</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<basedir>${basedir}/target/generated-sources/</basedir>
|
||||
<includes>
|
||||
<include>**/*.java</include>
|
||||
</includes>
|
||||
<!-- Ignore errors when missing files, because it means this build
|
||||
was run with -Dprotoc.skip and there is no -Dreplacer.skip -->
|
||||
<ignoreErrors>true</ignoreErrors>
|
||||
<replacements>
|
||||
<replacement>
|
||||
<token>(public)(\W+static)?(\W+final)?(\W+class)</token>
|
||||
<value>@javax.annotation.Generated("proto") $1$2$3$4</value>
|
||||
</replacement>
|
||||
<!-- replacer doesn't support anchoring or negative lookbehind -->
|
||||
<replacement>
|
||||
<token>(@javax.annotation.Generated\("proto"\) ){2}</token>
|
||||
<value>$1</value>
|
||||
</replacement>
|
||||
</replacements>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<configuration>
|
||||
<failOnViolation>true</failOnViolation>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>net.revelc.code</groupId>
|
||||
<artifactId>warbucks-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<dependencies>
|
||||
<!-- General dependencies -->
|
||||
<dependency>
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
<artifactId>protobuf-java</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<profiles>
|
||||
<!-- Skip the tests in this module -->
|
||||
<profile>
|
||||
<id>skipProtocolTests</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>skipProtocolTests</name>
|
||||
</property>
|
||||
</activation>
|
||||
<properties>
|
||||
<surefire.skipFirstPart>true</surefire.skipFirstPart>
|
||||
<surefire.skipSecondPart>true</surefire.skipSecondPart>
|
||||
</properties>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>build-with-jdk11</id>
|
||||
<activation>
|
||||
<jdk>[1.11,)</jdk>
|
||||
</activation>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>javax.annotation</groupId>
|
||||
<artifactId>javax.annotation-api</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>eclipse-specific</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>m2e.version</name>
|
||||
</property>
|
||||
</activation>
|
||||
<build>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
<!--This plugin's configuration is used to store Eclipse m2e settings only. It has no influence on the Maven build itself.-->
|
||||
<plugin>
|
||||
<groupId>org.eclipse.m2e</groupId>
|
||||
<artifactId>lifecycle-mapping</artifactId>
|
||||
<version>1.0.0</version>
|
||||
<configuration>
|
||||
<lifecycleMappingMetadata>
|
||||
<pluginExecutions>
|
||||
<pluginExecution>
|
||||
<pluginExecutionFilter>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-maven-plugins</artifactId>
|
||||
<versionRange>[2.0.5-alpha,)</versionRange>
|
||||
<goals>
|
||||
<goal>protoc</goal>
|
||||
</goals>
|
||||
</pluginExecutionFilter>
|
||||
<action>
|
||||
<ignore/>
|
||||
</action>
|
||||
</pluginExecution>
|
||||
<pluginExecution>
|
||||
<pluginExecutionFilter>
|
||||
<groupId>
|
||||
com.google.code.maven-replacer-plugin
|
||||
</groupId>
|
||||
<artifactId>replacer</artifactId>
|
||||
<versionRange>[1.5.3,)</versionRange>
|
||||
<goals>
|
||||
<goal>replace</goal>
|
||||
</goals>
|
||||
</pluginExecutionFilter>
|
||||
<action>
|
||||
<ignore></ignore>
|
||||
</action>
|
||||
</pluginExecution>
|
||||
</pluginExecutions>
|
||||
</lifecycleMappingMetadata>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</pluginManagement>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
|
@ -1,77 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.google.protobuf; // This is a lie.
|
||||
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Helper class to extract byte arrays from {@link ByteString} without copy.
|
||||
* <p>
|
||||
* Without this protobufs would force us to copy every single byte array out
|
||||
* of the objects de-serialized from the wire (which already do one copy, on
|
||||
* top of the copies the JVM does to go from kernel buffer to C buffer and
|
||||
* from C buffer to JVM buffer).
|
||||
*
|
||||
* @since 0.96.1
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class HBaseZeroCopyByteString extends LiteralByteString {
|
||||
// Gotten from AsyncHBase code base with permission.
|
||||
/** Private constructor so this class cannot be instantiated. */
|
||||
private HBaseZeroCopyByteString() {
|
||||
super(null);
|
||||
throw new UnsupportedOperationException("Should never be here.");
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps a byte array in a {@link ByteString} without copying it.
|
||||
* @param array array to be wrapped
|
||||
* @return wrapped array
|
||||
*/
|
||||
public static ByteString wrap(final byte[] array) {
|
||||
return new LiteralByteString(array);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps a subset of a byte array in a {@link ByteString} without copying it.
|
||||
* @param array array to be wrapped
|
||||
* @param offset from
|
||||
* @param length length
|
||||
* @return wrapped array
|
||||
*/
|
||||
public static ByteString wrap(final byte[] array, int offset, int length) {
|
||||
return new BoundedByteString(array, offset, length);
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// ZeroCopyLiteralByteString.wrap(this.buf, 0, this.count);
|
||||
|
||||
/**
|
||||
* Extracts the byte array from the given {@link ByteString} without copy.
|
||||
* @param buf A buffer from which to extract the array. This buffer must be
|
||||
* actually an instance of a {@code LiteralByteString}.
|
||||
* @return byte[] representation
|
||||
*/
|
||||
public static byte[] zeroCopyGetBytes(final ByteString buf) {
|
||||
if (buf instanceof LiteralByteString) {
|
||||
return ((LiteralByteString) buf).bytes;
|
||||
}
|
||||
throw new UnsupportedOperationException("Need a LiteralByteString, got a "
|
||||
+ buf.getClass().getName());
|
||||
}
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.util;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.HBaseZeroCopyByteString;
|
||||
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Hack to workaround HBASE-10304 issue that keeps bubbling up when a mapreduce context.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class ByteStringer {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ByteStringer.class);
|
||||
|
||||
/**
|
||||
* Flag set at class loading time.
|
||||
*/
|
||||
private static boolean USE_ZEROCOPYBYTESTRING = true;
|
||||
|
||||
// Can I classload HBaseZeroCopyByteString without IllegalAccessError?
|
||||
// If we can, use it passing ByteStrings to pb else use native ByteString though more costly
|
||||
// because it makes a copy of the passed in array.
|
||||
static {
|
||||
try {
|
||||
HBaseZeroCopyByteString.wrap(new byte [0]);
|
||||
} catch (IllegalAccessError iae) {
|
||||
USE_ZEROCOPYBYTESTRING = false;
|
||||
LOG.debug("Failed to classload HBaseZeroCopyByteString: " + iae.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private ByteStringer() {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps a byte array in a {@link ByteString} without copying it.
|
||||
*/
|
||||
public static ByteString wrap(final byte[] array) {
|
||||
return USE_ZEROCOPYBYTESTRING? HBaseZeroCopyByteString.wrap(array): ByteString.copyFrom(array);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps a subset of a byte array in a {@link ByteString} without copying it.
|
||||
*/
|
||||
public static ByteString wrap(final byte[] array, int offset, int length) {
|
||||
return USE_ZEROCOPYBYTESTRING? HBaseZeroCopyByteString.wrap(array, offset, length):
|
||||
ByteString.copyFrom(array, offset, length);
|
||||
}
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "AccessControlProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
|
||||
message Permission {
|
||||
enum Action {
|
||||
READ = 0;
|
||||
WRITE = 1;
|
||||
EXEC = 2;
|
||||
CREATE = 3;
|
||||
ADMIN = 4;
|
||||
}
|
||||
enum Type {
|
||||
Global = 1;
|
||||
Namespace = 2;
|
||||
Table = 3;
|
||||
}
|
||||
required Type type = 1;
|
||||
optional GlobalPermission global_permission = 2;
|
||||
optional NamespacePermission namespace_permission = 3;
|
||||
optional TablePermission table_permission = 4;
|
||||
}
|
||||
|
||||
message TablePermission {
|
||||
optional TableName table_name = 1;
|
||||
optional bytes family = 2;
|
||||
optional bytes qualifier = 3;
|
||||
repeated Permission.Action action = 4;
|
||||
}
|
||||
|
||||
message NamespacePermission {
|
||||
optional bytes namespace_name = 1;
|
||||
repeated Permission.Action action = 2;
|
||||
}
|
||||
|
||||
message GlobalPermission {
|
||||
repeated Permission.Action action = 1;
|
||||
}
|
||||
|
||||
message UserPermission {
|
||||
required bytes user = 1;
|
||||
required Permission permission = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Content of the /hbase/acl/<table or namespace> znode.
|
||||
*/
|
||||
message UsersAndPermissions {
|
||||
message UserPermissions {
|
||||
required bytes user = 1;
|
||||
repeated Permission permissions = 2;
|
||||
}
|
||||
|
||||
repeated UserPermissions user_permissions = 1;
|
||||
}
|
||||
|
||||
message GrantRequest {
|
||||
required UserPermission user_permission = 1;
|
||||
optional bool merge_existing_permissions = 2 [default = false];
|
||||
}
|
||||
|
||||
message GrantResponse {
|
||||
}
|
||||
|
||||
message RevokeRequest {
|
||||
required UserPermission user_permission = 1;
|
||||
}
|
||||
|
||||
message RevokeResponse {
|
||||
}
|
||||
|
||||
message GetUserPermissionsRequest {
|
||||
optional Permission.Type type = 1;
|
||||
optional TableName table_name = 2;
|
||||
optional bytes namespace_name = 3;
|
||||
optional bytes column_family = 4;
|
||||
optional bytes column_qualifier = 5;
|
||||
optional bytes user_name = 6;
|
||||
}
|
||||
|
||||
message GetUserPermissionsResponse {
|
||||
repeated UserPermission user_permission = 1;
|
||||
}
|
||||
|
||||
message CheckPermissionsRequest {
|
||||
repeated Permission permission = 1;
|
||||
}
|
||||
|
||||
message CheckPermissionsResponse {
|
||||
}
|
||||
|
||||
message HasPermissionRequest {
|
||||
required TablePermission table_permission = 1;
|
||||
required bytes user_name = 2;
|
||||
}
|
||||
|
||||
message HasPermissionResponse {
|
||||
optional bool has_permission = 1;
|
||||
}
|
||||
|
||||
service AccessControlService {
|
||||
rpc Grant(GrantRequest)
|
||||
returns (GrantResponse);
|
||||
|
||||
rpc Revoke(RevokeRequest)
|
||||
returns (RevokeResponse);
|
||||
|
||||
rpc GetUserPermissions(GetUserPermissionsRequest)
|
||||
returns (GetUserPermissionsResponse);
|
||||
|
||||
rpc CheckPermissions(CheckPermissionsRequest)
|
||||
returns (CheckPermissionsResponse);
|
||||
|
||||
rpc HasPermission(HasPermissionRequest)
|
||||
returns (HasPermissionResponse);
|
||||
}
|
|
@ -1,310 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers that are used for Admin service.
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "AdminProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
import "WAL.proto";
|
||||
|
||||
message GetRegionInfoRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
optional bool compaction_state = 2;
|
||||
}
|
||||
|
||||
message GetRegionInfoResponse {
|
||||
required RegionInfo region_info = 1;
|
||||
optional CompactionState compaction_state = 2;
|
||||
// optional bool DEPRECATED_isRecovering = 3;
|
||||
|
||||
enum CompactionState {
|
||||
NONE = 0;
|
||||
MINOR = 1;
|
||||
MAJOR = 2;
|
||||
MAJOR_AND_MINOR = 3;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of store files for a set of column families in a particular region.
|
||||
* If no column family is specified, get the store files for all column families.
|
||||
*/
|
||||
message GetStoreFileRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
repeated bytes family = 2;
|
||||
}
|
||||
|
||||
message GetStoreFileResponse {
|
||||
repeated string store_file = 1;
|
||||
}
|
||||
|
||||
message GetOnlineRegionRequest {
|
||||
}
|
||||
|
||||
message GetOnlineRegionResponse {
|
||||
repeated RegionInfo region_info = 1;
|
||||
}
|
||||
|
||||
message OpenRegionRequest {
|
||||
repeated RegionOpenInfo open_info = 1;
|
||||
// the intended server for this RPC.
|
||||
optional uint64 serverStartCode = 2;
|
||||
// wall clock time from master
|
||||
optional uint64 master_system_time = 5;
|
||||
|
||||
message RegionOpenInfo {
|
||||
required RegionInfo region = 1;
|
||||
optional uint32 version_of_offline_node = 2;
|
||||
repeated ServerName favored_nodes = 3;
|
||||
// open region for distributedLogReplay
|
||||
// optional bool DEPRECATED_openForDistributedLogReplay = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message OpenRegionResponse {
|
||||
repeated RegionOpeningState opening_state = 1;
|
||||
|
||||
enum RegionOpeningState {
|
||||
OPENED = 0;
|
||||
ALREADY_OPENED = 1;
|
||||
FAILED_OPENING = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message WarmupRegionRequest {
|
||||
|
||||
required RegionInfo regionInfo = 1;
|
||||
}
|
||||
|
||||
message WarmupRegionResponse {
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the specified region and will use or not use ZK during the close
|
||||
* according to the specified flag.
|
||||
*/
|
||||
message CloseRegionRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
optional uint32 version_of_closing_node = 2;
|
||||
optional bool transition_in_ZK = 3 [default = true];
|
||||
optional ServerName destination_server = 4;
|
||||
// the intended server for this RPC.
|
||||
optional uint64 serverStartCode = 5;
|
||||
}
|
||||
|
||||
message CloseRegionResponse {
|
||||
required bool closed = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes the MemStore of the specified region.
|
||||
* <p>
|
||||
* This method is synchronous.
|
||||
*/
|
||||
message FlushRegionRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
optional uint64 if_older_than_ts = 2;
|
||||
optional bool write_flush_wal_marker = 3; // whether to write a marker to WAL even if not flushed
|
||||
}
|
||||
|
||||
message FlushRegionResponse {
|
||||
required uint64 last_flush_time = 1;
|
||||
optional bool flushed = 2;
|
||||
optional bool wrote_flush_wal_marker = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Splits the specified region.
|
||||
* <p>
|
||||
* This method currently flushes the region and then forces a compaction which
|
||||
* will then trigger a split. The flush is done synchronously but the
|
||||
* compaction is asynchronous.
|
||||
*/
|
||||
message SplitRegionRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
optional bytes split_point = 2;
|
||||
}
|
||||
|
||||
message SplitRegionResponse {
|
||||
}
|
||||
|
||||
/**
|
||||
* Compacts the specified region. Performs a major compaction if specified.
|
||||
* <p>
|
||||
* This method is asynchronous.
|
||||
*/
|
||||
message CompactRegionRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
optional bool major = 2;
|
||||
optional bytes family = 3;
|
||||
}
|
||||
|
||||
message CompactRegionResponse {
|
||||
}
|
||||
|
||||
message UpdateFavoredNodesRequest {
|
||||
repeated RegionUpdateInfo update_info = 1;
|
||||
|
||||
message RegionUpdateInfo {
|
||||
required RegionInfo region = 1;
|
||||
repeated ServerName favored_nodes = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message UpdateFavoredNodesResponse {
|
||||
optional uint32 response = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges the specified regions.
|
||||
* <p>
|
||||
* This method currently closes the regions and then merges them
|
||||
*/
|
||||
message MergeRegionsRequest {
|
||||
required RegionSpecifier region_a = 1;
|
||||
required RegionSpecifier region_b = 2;
|
||||
optional bool forcible = 3 [default = false];
|
||||
// wall clock time from master
|
||||
optional uint64 master_system_time = 4;
|
||||
}
|
||||
|
||||
message MergeRegionsResponse {
|
||||
}
|
||||
|
||||
// Protocol buffer version of WAL for replication
|
||||
message WALEntry {
|
||||
required WALKey key = 1;
|
||||
// Following may be null if the KVs/Cells are carried along the side in a cellblock (See
|
||||
// RPC for more on cellblocks). If Cells/KVs are in a cellblock, this next field is null
|
||||
// and associated_cell_count has count of Cells associated w/ this WALEntry
|
||||
repeated bytes key_value_bytes = 2;
|
||||
// If Cell data is carried alongside in a cellblock, this is count of Cells in the cellblock.
|
||||
optional int32 associated_cell_count = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Replicates the given entries. The guarantee is that the given entries
|
||||
* will be durable on the slave cluster if this method returns without
|
||||
* any exception.
|
||||
*/
|
||||
message ReplicateWALEntryRequest {
|
||||
repeated WALEntry entry = 1;
|
||||
optional string replicationClusterId = 2;
|
||||
optional string sourceBaseNamespaceDirPath = 3;
|
||||
optional string sourceHFileArchiveDirPath = 4;
|
||||
}
|
||||
|
||||
message ReplicateWALEntryResponse {
|
||||
}
|
||||
|
||||
message RollWALWriterRequest {
|
||||
}
|
||||
|
||||
/*
|
||||
* Roll request responses no longer include regions to flush
|
||||
* this list will always be empty when talking to a 1.0 server
|
||||
*/
|
||||
message RollWALWriterResponse {
|
||||
// A list of encoded name of regions to flush
|
||||
repeated bytes region_to_flush = 1;
|
||||
}
|
||||
|
||||
message StopServerRequest {
|
||||
required string reason = 1;
|
||||
}
|
||||
|
||||
message StopServerResponse {
|
||||
}
|
||||
|
||||
message GetServerInfoRequest {
|
||||
}
|
||||
|
||||
message ServerInfo {
|
||||
required ServerName server_name = 1;
|
||||
optional uint32 webui_port = 2;
|
||||
}
|
||||
|
||||
message GetServerInfoResponse {
|
||||
required ServerInfo server_info = 1;
|
||||
}
|
||||
|
||||
message UpdateConfigurationRequest {
|
||||
}
|
||||
|
||||
message UpdateConfigurationResponse {
|
||||
}
|
||||
|
||||
service AdminService {
|
||||
rpc GetRegionInfo(GetRegionInfoRequest)
|
||||
returns(GetRegionInfoResponse);
|
||||
|
||||
rpc GetStoreFile(GetStoreFileRequest)
|
||||
returns(GetStoreFileResponse);
|
||||
|
||||
rpc GetOnlineRegion(GetOnlineRegionRequest)
|
||||
returns(GetOnlineRegionResponse);
|
||||
|
||||
rpc OpenRegion(OpenRegionRequest)
|
||||
returns(OpenRegionResponse);
|
||||
|
||||
rpc WarmupRegion(WarmupRegionRequest)
|
||||
returns(WarmupRegionResponse);
|
||||
|
||||
rpc CloseRegion(CloseRegionRequest)
|
||||
returns(CloseRegionResponse);
|
||||
|
||||
rpc FlushRegion(FlushRegionRequest)
|
||||
returns(FlushRegionResponse);
|
||||
|
||||
rpc SplitRegion(SplitRegionRequest)
|
||||
returns(SplitRegionResponse);
|
||||
|
||||
rpc CompactRegion(CompactRegionRequest)
|
||||
returns(CompactRegionResponse);
|
||||
|
||||
rpc MergeRegions(MergeRegionsRequest)
|
||||
returns(MergeRegionsResponse);
|
||||
|
||||
rpc ReplicateWALEntry(ReplicateWALEntryRequest)
|
||||
returns(ReplicateWALEntryResponse);
|
||||
|
||||
rpc Replay(ReplicateWALEntryRequest)
|
||||
returns(ReplicateWALEntryResponse);
|
||||
|
||||
rpc RollWALWriter(RollWALWriterRequest)
|
||||
returns(RollWALWriterResponse);
|
||||
|
||||
rpc GetServerInfo(GetServerInfoRequest)
|
||||
returns(GetServerInfoResponse);
|
||||
|
||||
rpc StopServer(StopServerRequest)
|
||||
returns(StopServerResponse);
|
||||
|
||||
rpc UpdateFavoredNodes(UpdateFavoredNodesRequest)
|
||||
returns(UpdateFavoredNodesResponse);
|
||||
|
||||
rpc UpdateConfiguration(UpdateConfigurationRequest)
|
||||
returns(UpdateConfigurationResponse);
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "AuthenticationProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
message AuthenticationKey {
|
||||
required int32 id = 1;
|
||||
required int64 expiration_date = 2;
|
||||
required bytes key = 3;
|
||||
}
|
||||
|
||||
|
||||
message TokenIdentifier {
|
||||
enum Kind {
|
||||
HBASE_AUTH_TOKEN = 0;
|
||||
}
|
||||
required Kind kind = 1;
|
||||
required bytes username = 2;
|
||||
required int32 key_id = 3;
|
||||
optional int64 issue_date = 4;
|
||||
optional int64 expiration_date = 5;
|
||||
optional int64 sequence_number = 6;
|
||||
}
|
||||
|
||||
|
||||
// Serialization of the org.apache.hadoop.security.token.Token class
|
||||
// Note that this is a Hadoop class, so fields may change!
|
||||
message Token {
|
||||
// the TokenIdentifier in serialized form
|
||||
// Note: we can't use the protobuf directly because the Hadoop Token class
|
||||
// only stores the serialized bytes
|
||||
optional bytes identifier = 1;
|
||||
optional bytes password = 2;
|
||||
optional bytes service = 3;
|
||||
}
|
||||
|
||||
|
||||
// RPC request & response messages
|
||||
message GetAuthenticationTokenRequest {
|
||||
}
|
||||
|
||||
message GetAuthenticationTokenResponse {
|
||||
optional Token token = 1;
|
||||
}
|
||||
|
||||
message WhoAmIRequest {
|
||||
}
|
||||
|
||||
message WhoAmIResponse {
|
||||
optional string username = 1;
|
||||
optional string auth_method = 2;
|
||||
}
|
||||
|
||||
|
||||
// RPC service
|
||||
service AuthenticationService {
|
||||
rpc GetAuthenticationToken(GetAuthenticationTokenRequest)
|
||||
returns (GetAuthenticationTokenResponse);
|
||||
|
||||
rpc WhoAmI(WhoAmIRequest)
|
||||
returns (WhoAmIResponse);
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// Cell and KeyValue protos
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "CellProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
/**
|
||||
* The type of the key in a Cell
|
||||
*/
|
||||
enum CellType {
|
||||
MINIMUM = 0;
|
||||
PUT = 4;
|
||||
|
||||
DELETE = 8;
|
||||
DELETE_FAMILY_VERSION = 10;
|
||||
DELETE_COLUMN = 12;
|
||||
DELETE_FAMILY = 14;
|
||||
|
||||
// MAXIMUM is used when searching; you look from maximum on down.
|
||||
MAXIMUM = 255;
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol buffer version of Cell.
|
||||
*/
|
||||
message Cell {
|
||||
optional bytes row = 1;
|
||||
optional bytes family = 2;
|
||||
optional bytes qualifier = 3;
|
||||
optional uint64 timestamp = 4;
|
||||
optional CellType cell_type = 5;
|
||||
optional bytes value = 6;
|
||||
optional bytes tags = 7;
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol buffer version of KeyValue.
|
||||
* It doesn't have those transient parameters
|
||||
*/
|
||||
message KeyValue {
|
||||
required bytes row = 1;
|
||||
required bytes family = 2;
|
||||
required bytes qualifier = 3;
|
||||
optional uint64 timestamp = 4;
|
||||
optional CellType key_type = 5;
|
||||
optional bytes value = 6;
|
||||
optional bytes tags = 7;
|
||||
}
|
|
@ -1,550 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers that are used for Client service.
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "ClientProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
import "Filter.proto";
|
||||
import "Cell.proto";
|
||||
import "Comparator.proto";
|
||||
import "MapReduce.proto";
|
||||
|
||||
/**
|
||||
* The protocol buffer version of Authorizations.
|
||||
*/
|
||||
message Authorizations {
|
||||
repeated string label = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* The protocol buffer version of CellVisibility.
|
||||
*/
|
||||
message CellVisibility {
|
||||
required string expression = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Container for a list of column qualifier names of a family.
|
||||
*/
|
||||
message Column {
|
||||
required bytes family = 1;
|
||||
repeated bytes qualifier = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Consistency defines the expected consistency level for an operation.
|
||||
*/
|
||||
enum Consistency {
|
||||
STRONG = 0;
|
||||
TIMELINE = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* The protocol buffer version of Get.
|
||||
* Unless existence_only is specified, return all the requested data
|
||||
* for the row that matches exactly.
|
||||
*/
|
||||
message Get {
|
||||
required bytes row = 1;
|
||||
repeated Column column = 2;
|
||||
repeated NameBytesPair attribute = 3;
|
||||
optional Filter filter = 4;
|
||||
optional TimeRange time_range = 5;
|
||||
optional uint32 max_versions = 6 [default = 1];
|
||||
optional bool cache_blocks = 7 [default = true];
|
||||
optional uint32 store_limit = 8;
|
||||
optional uint32 store_offset = 9;
|
||||
|
||||
// The result isn't asked for, just check for
|
||||
// the existence.
|
||||
optional bool existence_only = 10 [default = false];
|
||||
|
||||
// If the row to get doesn't exist, return the
|
||||
// closest row before. Deprecated. No longer used!
|
||||
// Since hbase-2.0.0.
|
||||
optional bool closest_row_before = 11 [default = false];
|
||||
|
||||
optional Consistency consistency = 12 [default = STRONG];
|
||||
repeated ColumnFamilyTimeRange cf_time_range = 13;
|
||||
optional bool load_column_families_on_demand = 14; /* DO NOT add defaults to load_column_families_on_demand. */
|
||||
}
|
||||
|
||||
message Result {
|
||||
// Result includes the Cells or else it just has a count of Cells
|
||||
// that are carried otherwise.
|
||||
repeated Cell cell = 1;
|
||||
// The below count is set when the associated cells are
|
||||
// not part of this protobuf message; they are passed alongside
|
||||
// and then this Message is just a placeholder with metadata.
|
||||
// The count is needed to know how many to peel off the block of Cells as
|
||||
// ours. NOTE: This is different from the pb managed cell_count of the
|
||||
// 'cell' field above which is non-null when the cells are pb'd.
|
||||
optional int32 associated_cell_count = 2;
|
||||
|
||||
// used for Get to check existence only. Not set if existence_only was not set to true
|
||||
// in the query.
|
||||
optional bool exists = 3;
|
||||
|
||||
// Whether or not the results are coming from possibly stale data
|
||||
optional bool stale = 4 [default = false];
|
||||
|
||||
// Whether or not the entire result could be returned. Results will be split when
|
||||
// the RPC chunk size limit is reached. Partial results contain only a subset of the
|
||||
// cells for a row and must be combined with a result containing the remaining cells
|
||||
// to form a complete result. The equivalent flag in o.a.h.h.client.Result is
|
||||
// mayHaveMoreCellsInRow.
|
||||
optional bool partial = 5 [default = false];
|
||||
}
|
||||
|
||||
/**
|
||||
* The get request. Perform a single Get operation.
|
||||
*/
|
||||
message GetRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
required Get get = 2;
|
||||
}
|
||||
|
||||
message GetResponse {
|
||||
optional Result result = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Condition to check if the value of a given cell (row,
|
||||
* family, qualifier) matches a value via a given comparator.
|
||||
*
|
||||
* Condition is used in check and mutate operations.
|
||||
*/
|
||||
message Condition {
|
||||
required bytes row = 1;
|
||||
optional bytes family = 2;
|
||||
optional bytes qualifier = 3;
|
||||
optional CompareType compare_type = 4;
|
||||
optional Comparator comparator = 5;
|
||||
optional TimeRange time_range = 6;
|
||||
optional Filter filter = 7;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* A specific mutation inside a mutate request.
|
||||
* It can be an append, increment, put or delete based
|
||||
* on the mutation type. It can be fully filled in or
|
||||
* only metadata present because data is being carried
|
||||
* elsewhere outside of pb.
|
||||
*/
|
||||
message MutationProto {
|
||||
optional bytes row = 1;
|
||||
optional MutationType mutate_type = 2;
|
||||
repeated ColumnValue column_value = 3;
|
||||
optional uint64 timestamp = 4;
|
||||
repeated NameBytesPair attribute = 5;
|
||||
optional Durability durability = 6 [default = USE_DEFAULT];
|
||||
|
||||
// For some mutations, a result may be returned, in which case,
|
||||
// time range can be specified for potential performance gain
|
||||
optional TimeRange time_range = 7;
|
||||
// The below count is set when the associated cells are NOT
|
||||
// part of this protobuf message; they are passed alongside
|
||||
// and then this Message is a placeholder with metadata. The
|
||||
// count is needed to know how many to peel off the block of Cells as
|
||||
// ours. NOTE: This is different from the pb managed cell_count of the
|
||||
// 'cell' field above which is non-null when the cells are pb'd.
|
||||
optional int32 associated_cell_count = 8;
|
||||
|
||||
optional uint64 nonce = 9;
|
||||
|
||||
enum Durability {
|
||||
USE_DEFAULT = 0;
|
||||
SKIP_WAL = 1;
|
||||
ASYNC_WAL = 2;
|
||||
SYNC_WAL = 3;
|
||||
FSYNC_WAL = 4;
|
||||
}
|
||||
|
||||
enum MutationType {
|
||||
APPEND = 0;
|
||||
INCREMENT = 1;
|
||||
PUT = 2;
|
||||
DELETE = 3;
|
||||
}
|
||||
|
||||
enum DeleteType {
|
||||
DELETE_ONE_VERSION = 0;
|
||||
DELETE_MULTIPLE_VERSIONS = 1;
|
||||
DELETE_FAMILY = 2;
|
||||
DELETE_FAMILY_VERSION = 3;
|
||||
}
|
||||
|
||||
message ColumnValue {
|
||||
required bytes family = 1;
|
||||
repeated QualifierValue qualifier_value = 2;
|
||||
|
||||
message QualifierValue {
|
||||
optional bytes qualifier = 1;
|
||||
optional bytes value = 2;
|
||||
optional uint64 timestamp = 3;
|
||||
optional DeleteType delete_type = 4;
|
||||
optional bytes tags = 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The mutate request. Perform a single Mutate operation.
|
||||
*
|
||||
* Optionally, you can specify a condition. The mutate
|
||||
* will take place only if the condition is met. Otherwise,
|
||||
* the mutate will be ignored. In the response result,
|
||||
* parameter processed is used to indicate if the mutate
|
||||
* actually happened.
|
||||
*/
|
||||
message MutateRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
required MutationProto mutation = 2;
|
||||
optional Condition condition = 3;
|
||||
optional uint64 nonce_group = 4;
|
||||
}
|
||||
|
||||
message MutateResponse {
|
||||
optional Result result = 1;
|
||||
|
||||
// used for mutate to indicate processed only
|
||||
optional bool processed = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Instead of get from a table, you can scan it with optional filters.
|
||||
* You can specify the row key range, time range, the columns/families
|
||||
* to scan and so on.
|
||||
*
|
||||
* This scan is used the first time in a scan request. The response of
|
||||
* the initial scan will return a scanner id, which should be used to
|
||||
* fetch result batches later on before it is closed.
|
||||
*/
|
||||
message Scan {
|
||||
repeated Column column = 1;
|
||||
repeated NameBytesPair attribute = 2;
|
||||
optional bytes start_row = 3;
|
||||
optional bytes stop_row = 4;
|
||||
optional Filter filter = 5;
|
||||
optional TimeRange time_range = 6;
|
||||
optional uint32 max_versions = 7 [default = 1];
|
||||
optional bool cache_blocks = 8 [default = true];
|
||||
optional uint32 batch_size = 9;
|
||||
optional uint64 max_result_size = 10;
|
||||
optional uint32 store_limit = 11;
|
||||
optional uint32 store_offset = 12;
|
||||
optional bool load_column_families_on_demand = 13; /* DO NOT add defaults to load_column_families_on_demand. */
|
||||
optional bool small = 14 [deprecated = true];
|
||||
optional bool reversed = 15 [default = false];
|
||||
optional Consistency consistency = 16 [default = STRONG];
|
||||
optional uint32 caching = 17;
|
||||
optional bool allow_partial_results = 18;
|
||||
repeated ColumnFamilyTimeRange cf_time_range = 19;
|
||||
optional uint64 mvcc_read_point = 20 [default = 0];
|
||||
optional bool include_start_row = 21 [default = true];
|
||||
optional bool include_stop_row = 22 [default = false];
|
||||
enum ReadType {
|
||||
DEFAULT = 0;
|
||||
STREAM = 1;
|
||||
PREAD = 2;
|
||||
}
|
||||
optional ReadType readType = 23 [default = DEFAULT];
|
||||
optional bool need_cursor_result = 24 [default = false];
|
||||
}
|
||||
|
||||
/**
|
||||
* A scan request. Initially, it should specify a scan. Later on, you
|
||||
* can use the scanner id returned to fetch result batches with a different
|
||||
* scan request.
|
||||
*
|
||||
* The scanner will remain open if there are more results, and it's not
|
||||
* asked to be closed explicitly.
|
||||
*
|
||||
* You can fetch the results and ask the scanner to be closed to save
|
||||
* a trip if you are not interested in remaining results.
|
||||
*/
|
||||
message ScanRequest {
|
||||
optional RegionSpecifier region = 1;
|
||||
optional Scan scan = 2;
|
||||
optional uint64 scanner_id = 3;
|
||||
optional uint32 number_of_rows = 4;
|
||||
optional bool close_scanner = 5;
|
||||
optional uint64 next_call_seq = 6;
|
||||
optional bool client_handles_partials = 7;
|
||||
optional bool client_handles_heartbeats = 8;
|
||||
optional bool track_scan_metrics = 9;
|
||||
optional bool renew = 10 [default = false];
|
||||
// if we have returned limit_of_rows rows to client, then close the scanner.
|
||||
optional uint32 limit_of_rows = 11 [default = 0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan cursor to tell client where we are scanning.
|
||||
*
|
||||
*/
|
||||
message Cursor {
|
||||
optional bytes row = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* The scan response. If there are no more results, more_results will
|
||||
* be false. If it is not specified, it means there are more.
|
||||
*/
|
||||
message ScanResponse {
|
||||
// This field is filled in if we are doing cellblocks. A cellblock is made up
|
||||
// of all Cells serialized out as one cellblock BUT responses from a server
|
||||
// have their Cells grouped by Result. So we can reconstitute the
|
||||
// Results on the client-side, this field is a list of counts of Cells
|
||||
// in each Result that makes up the response. For example, if this field
|
||||
// has 3, 3, 3 in it, then we know that on the client, we are to make
|
||||
// three Results each of three Cells each.
|
||||
repeated uint32 cells_per_result = 1;
|
||||
|
||||
optional uint64 scanner_id = 2;
|
||||
optional bool more_results = 3;
|
||||
optional uint32 ttl = 4;
|
||||
// If cells are not carried in an accompanying cellblock, then they are pb'd here.
|
||||
// This field is mutually exclusive with cells_per_result (since the Cells will
|
||||
// be inside the pb'd Result)
|
||||
repeated Result results = 5;
|
||||
optional bool stale = 6;
|
||||
|
||||
// This field is filled in if we are doing cellblocks. In the event that a row
|
||||
// could not fit all of its cells into a single RPC chunk, the results will be
|
||||
// returned as partials, and reconstructed into a complete result on the client
|
||||
// side. This field is a list of flags indicating whether or not the result
|
||||
// that the cells belong to is a partial result. For example, if this field
|
||||
// has false, false, true in it, then we know that on the client side, we need to
|
||||
// make another RPC request since the last result was only a partial.
|
||||
repeated bool partial_flag_per_result = 7;
|
||||
|
||||
// A server may choose to limit the number of results returned to the client for
|
||||
// reasons such as the size in bytes or quantity of results accumulated. This field
|
||||
// will true when more results exist in the current region.
|
||||
optional bool more_results_in_region = 8;
|
||||
|
||||
// This field is filled in if the server is sending back a heartbeat message.
|
||||
// Heartbeat messages are sent back to the client to prevent the scanner from
|
||||
// timing out. Seeing a heartbeat message communicates to the Client that the
|
||||
// server would have continued to scan had the time limit not been reached.
|
||||
optional bool heartbeat_message = 9;
|
||||
|
||||
// This field is filled in if the client has requested that scan metrics be tracked.
|
||||
// The metrics tracked here are sent back to the client to be tracked together with
|
||||
// the existing client side metrics.
|
||||
optional ScanMetrics scan_metrics = 10;
|
||||
|
||||
// The mvcc read point which is used to open the scanner at server side. Client can
|
||||
// make use of this mvcc_read_point when restarting a scanner to get a consistent view
|
||||
// of a row.
|
||||
optional uint64 mvcc_read_point = 11 [default = 0];
|
||||
|
||||
// If the Scan need cursor, return the row key we are scanning in heartbeat message.
|
||||
// If the Scan doesn't need a cursor, don't set this field to reduce network IO.
|
||||
optional Cursor cursor = 12;
|
||||
}
|
||||
|
||||
/**
|
||||
* Atomically bulk load multiple HFiles (say from different column families)
|
||||
* into an open region.
|
||||
*/
|
||||
message BulkLoadHFileRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
repeated FamilyPath family_path = 2;
|
||||
optional bool assign_seq_num = 3;
|
||||
optional DelegationToken fs_token = 4;
|
||||
optional string bulk_token = 5;
|
||||
optional bool copy_file = 6 [default = false];
|
||||
|
||||
message FamilyPath {
|
||||
required bytes family = 1;
|
||||
required string path = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message BulkLoadHFileResponse {
|
||||
required bool loaded = 1;
|
||||
}
|
||||
|
||||
message DelegationToken {
|
||||
optional bytes identifier = 1;
|
||||
optional bytes password = 2;
|
||||
optional string kind = 3;
|
||||
optional string service = 4;
|
||||
}
|
||||
|
||||
message PrepareBulkLoadRequest {
|
||||
required TableName table_name = 1;
|
||||
optional RegionSpecifier region = 2;
|
||||
}
|
||||
|
||||
message PrepareBulkLoadResponse {
|
||||
required string bulk_token = 1;
|
||||
}
|
||||
|
||||
message CleanupBulkLoadRequest {
|
||||
required string bulk_token = 1;
|
||||
optional RegionSpecifier region = 2;
|
||||
}
|
||||
|
||||
message CleanupBulkLoadResponse {
|
||||
}
|
||||
|
||||
message CoprocessorServiceCall {
|
||||
required bytes row = 1;
|
||||
required string service_name = 2;
|
||||
required string method_name = 3;
|
||||
required bytes request = 4;
|
||||
}
|
||||
|
||||
message CoprocessorServiceResult {
|
||||
optional NameBytesPair value = 1;
|
||||
}
|
||||
|
||||
message CoprocessorServiceRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
required CoprocessorServiceCall call = 2;
|
||||
}
|
||||
|
||||
message CoprocessorServiceResponse {
|
||||
required RegionSpecifier region = 1;
|
||||
required NameBytesPair value = 2;
|
||||
}
|
||||
|
||||
// Either a Get or a Mutation
|
||||
message Action {
|
||||
// If part of a multi action, useful aligning
|
||||
// result with what was originally submitted.
|
||||
optional uint32 index = 1;
|
||||
optional MutationProto mutation = 2;
|
||||
optional Get get = 3;
|
||||
optional CoprocessorServiceCall service_call = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Actions to run against a Region.
|
||||
*/
|
||||
message RegionAction {
|
||||
required RegionSpecifier region = 1;
|
||||
// When set, run mutations as atomic unit.
|
||||
optional bool atomic = 2;
|
||||
repeated Action action = 3;
|
||||
}
|
||||
|
||||
/*
|
||||
* Statistics about the current load on the region
|
||||
*/
|
||||
message RegionLoadStats {
|
||||
// Percent load on the memstore. Guaranteed to be positive, between 0 and 100.
|
||||
optional int32 memStoreLoad = 1 [default = 0];
|
||||
// Percent JVM heap occupancy. Guaranteed to be positive, between 0 and 100.
|
||||
// We can move this to "ServerLoadStats" should we develop them.
|
||||
optional int32 heapOccupancy = 2 [default = 0];
|
||||
// Compaction pressure. Guaranteed to be positive, between 0 and 100.
|
||||
optional int32 compactionPressure = 3 [default = 0];
|
||||
}
|
||||
|
||||
message MultiRegionLoadStats{
|
||||
repeated RegionSpecifier region = 1;
|
||||
repeated RegionLoadStats stat = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Either a Result or an Exception NameBytesPair (keyed by
|
||||
* exception name whose value is the exception stringified)
|
||||
* or maybe empty if no result and no exception.
|
||||
*/
|
||||
message ResultOrException {
|
||||
// If part of a multi call, save original index of the list of all
|
||||
// passed so can align this response w/ original request.
|
||||
optional uint32 index = 1;
|
||||
optional Result result = 2;
|
||||
optional NameBytesPair exception = 3;
|
||||
// result if this was a coprocessor service call
|
||||
optional CoprocessorServiceResult service_result = 4;
|
||||
// current load on the region
|
||||
optional RegionLoadStats loadStats = 5 [deprecated=true];
|
||||
}
|
||||
|
||||
/**
|
||||
* The result of a RegionAction.
|
||||
*/
|
||||
message RegionActionResult {
|
||||
repeated ResultOrException resultOrException = 1;
|
||||
// If the operation failed globally for this region, this exception is set
|
||||
optional NameBytesPair exception = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a list of actions on a given region in order.
|
||||
* Nothing prevents a request to contains a set of RegionAction on the same region.
|
||||
* For this reason, the matching between the MultiRequest and the MultiResponse is not
|
||||
* done by the region specifier but by keeping the order of the RegionActionResult vs.
|
||||
* the order of the RegionAction.
|
||||
*/
|
||||
message MultiRequest {
|
||||
repeated RegionAction regionAction = 1;
|
||||
optional uint64 nonceGroup = 2;
|
||||
optional Condition condition = 3;
|
||||
}
|
||||
|
||||
message MultiResponse {
|
||||
repeated RegionActionResult regionActionResult = 1;
|
||||
// used for mutate to indicate processed only
|
||||
optional bool processed = 2;
|
||||
optional MultiRegionLoadStats regionStatistics = 3;
|
||||
}
|
||||
|
||||
|
||||
service ClientService {
|
||||
rpc Get(GetRequest)
|
||||
returns(GetResponse);
|
||||
|
||||
rpc Mutate(MutateRequest)
|
||||
returns(MutateResponse);
|
||||
|
||||
rpc Scan(ScanRequest)
|
||||
returns(ScanResponse);
|
||||
|
||||
rpc BulkLoadHFile(BulkLoadHFileRequest)
|
||||
returns(BulkLoadHFileResponse);
|
||||
|
||||
rpc PrepareBulkLoad(PrepareBulkLoadRequest)
|
||||
returns (PrepareBulkLoadResponse);
|
||||
|
||||
rpc CleanupBulkLoad(CleanupBulkLoadRequest)
|
||||
returns (CleanupBulkLoadResponse);
|
||||
|
||||
rpc ExecService(CoprocessorServiceRequest)
|
||||
returns(CoprocessorServiceResponse);
|
||||
|
||||
rpc ExecRegionServerService(CoprocessorServiceRequest)
|
||||
returns(CoprocessorServiceResponse);
|
||||
|
||||
rpc Multi(MultiRequest)
|
||||
returns(MultiResponse);
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers that are shared throughout HBase
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "ClusterIdProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
/**
|
||||
* Content of the '/hbase/hbaseid', cluster id, znode.
|
||||
* Also cluster of the ${HBASE_ROOTDIR}/hbase.id file.
|
||||
*/
|
||||
message ClusterId {
|
||||
// This is the cluster id, a uuid as a String
|
||||
required string cluster_id = 1;
|
||||
}
|
|
@ -1,283 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers that are used for ClustStatus
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "ClusterStatusProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
import "ClusterId.proto";
|
||||
import "FS.proto";
|
||||
|
||||
message RegionState {
|
||||
required RegionInfo region_info = 1;
|
||||
required State state = 2;
|
||||
optional uint64 stamp = 3;
|
||||
enum State {
|
||||
OFFLINE = 0; // region is in an offline state
|
||||
PENDING_OPEN = 1; // sent rpc to server to open but has not begun
|
||||
OPENING = 2; // server has begun to open but not yet done
|
||||
OPEN = 3; // server opened region and updated meta
|
||||
PENDING_CLOSE = 4; // sent rpc to server to close but has not begun
|
||||
CLOSING = 5; // server has begun to close but not yet done
|
||||
CLOSED = 6; // server closed region and updated meta
|
||||
SPLITTING = 7; // server started split of a region
|
||||
SPLIT = 8; // server completed split of a region
|
||||
FAILED_OPEN = 9; // failed to open, and won't retry any more
|
||||
FAILED_CLOSE = 10; // failed to close, and won't retry any more
|
||||
MERGING = 11; // server started merge a region
|
||||
MERGED = 12; // server completed merge of a region
|
||||
SPLITTING_NEW = 13; // new region to be created when RS splits a parent
|
||||
// region but hasn't be created yet, or master doesn't
|
||||
// know it's already created
|
||||
MERGING_NEW = 14; // new region to be created when RS merges two
|
||||
// daughter regions but hasn't be created yet, or
|
||||
// master doesn't know it's already created
|
||||
}
|
||||
}
|
||||
|
||||
message RegionInTransition {
|
||||
required RegionSpecifier spec = 1;
|
||||
required RegionState region_state = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* sequence Id of a store
|
||||
*/
|
||||
message StoreSequenceId {
|
||||
required bytes family_name = 1;
|
||||
required uint64 sequence_id = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* contains a sequence id of a region which should be the minimum of its store sequence ids and
|
||||
* list of sequence ids of the region's stores
|
||||
*/
|
||||
message RegionStoreSequenceIds {
|
||||
required uint64 last_flushed_sequence_id = 1;
|
||||
repeated StoreSequenceId store_sequence_id = 2;
|
||||
}
|
||||
|
||||
message RegionLoad {
|
||||
/** the region specifier */
|
||||
required RegionSpecifier region_specifier = 1;
|
||||
|
||||
/** the number of stores for the region */
|
||||
optional uint32 stores = 2;
|
||||
|
||||
/** the number of storefiles for the region */
|
||||
optional uint32 storefiles = 3;
|
||||
|
||||
/** the total size of the store files for the region, uncompressed, in MB */
|
||||
optional uint32 store_uncompressed_size_MB = 4;
|
||||
|
||||
/** the current total size of the store files for the region, in MB */
|
||||
optional uint32 storefile_size_MB = 5;
|
||||
|
||||
/** the current size of the memstore for the region, in MB */
|
||||
optional uint32 memstore_size_MB = 6;
|
||||
|
||||
/**
|
||||
* The current total size of root-level store file indexes for the region,
|
||||
* in KB. The same as {@link #rootIndexSizeKB}.
|
||||
*/
|
||||
optional uint64 storefile_index_size_KB = 7;
|
||||
|
||||
/** the current total read requests made to region */
|
||||
optional uint64 read_requests_count = 8;
|
||||
|
||||
/** the current total write requests made to region */
|
||||
optional uint64 write_requests_count = 9;
|
||||
|
||||
/** the total compacting key values in currently running compaction */
|
||||
optional uint64 total_compacting_KVs = 10;
|
||||
|
||||
/** the completed count of key values in currently running compaction */
|
||||
optional uint64 current_compacted_KVs = 11;
|
||||
|
||||
/** The current total size of root-level indexes for the region, in KB. */
|
||||
optional uint32 root_index_size_KB = 12;
|
||||
|
||||
/** The total size of all index blocks, not just the root level, in KB. */
|
||||
optional uint32 total_static_index_size_KB = 13;
|
||||
|
||||
/**
|
||||
* The total size of all Bloom filter blocks, not just loaded into the
|
||||
* block cache, in KB.
|
||||
*/
|
||||
optional uint32 total_static_bloom_size_KB = 14;
|
||||
|
||||
/** the most recent sequence Id from cache flush */
|
||||
optional uint64 complete_sequence_id = 15;
|
||||
|
||||
/** The current data locality for region in the regionserver */
|
||||
optional float data_locality = 16;
|
||||
|
||||
optional uint64 last_major_compaction_ts = 17 [default = 0];
|
||||
|
||||
/** the most recent sequence Id of store from cache flush */
|
||||
repeated StoreSequenceId store_complete_sequence_id = 18;
|
||||
|
||||
/** the current total filtered read requests made to region */
|
||||
optional uint64 filtered_read_requests_count = 19;
|
||||
|
||||
/** the current total coprocessor requests made to region */
|
||||
optional uint64 cp_requests_count = 20;
|
||||
|
||||
/** the number of references active on the store */
|
||||
optional int32 store_ref_count = 21 [default = 0];
|
||||
|
||||
/**
|
||||
* The max number of references active on single store file among all compacted store files
|
||||
* that belong to given region
|
||||
*/
|
||||
optional int32 max_compacted_store_file_ref_count = 22 [default = 0];
|
||||
}
|
||||
|
||||
message UserLoad {
|
||||
|
||||
/** short user name */
|
||||
required string userName = 1;
|
||||
|
||||
/** Metrics for all clients of a user */
|
||||
repeated ClientMetrics clientMetrics = 2;
|
||||
|
||||
|
||||
}
|
||||
|
||||
message ClientMetrics {
|
||||
/** client host name */
|
||||
required string hostName = 1;
|
||||
|
||||
/** the current total read requests made from a client */
|
||||
optional uint64 read_requests_count = 2;
|
||||
|
||||
/** the current total write requests made from a client */
|
||||
optional uint64 write_requests_count = 3;
|
||||
|
||||
/** the current total filtered requests made from a client */
|
||||
optional uint64 filtered_requests_count = 4;
|
||||
|
||||
}
|
||||
|
||||
/* Server-level protobufs */
|
||||
|
||||
message ReplicationLoadSink {
|
||||
required uint64 ageOfLastAppliedOp = 1;
|
||||
required uint64 timeStampsOfLastAppliedOp = 2;
|
||||
}
|
||||
|
||||
message ReplicationLoadSource {
|
||||
required string peerID = 1;
|
||||
required uint64 ageOfLastShippedOp = 2;
|
||||
required uint32 sizeOfLogQueue = 3;
|
||||
required uint64 timeStampOfLastShippedOp = 4;
|
||||
required uint64 replicationLag = 5;
|
||||
}
|
||||
|
||||
message ServerLoad {
|
||||
/** Number of requests since last report. */
|
||||
optional uint64 number_of_requests = 1;
|
||||
|
||||
/** Total Number of requests from the start of the region server. */
|
||||
optional uint64 total_number_of_requests = 2;
|
||||
|
||||
/** the amount of used heap, in MB. */
|
||||
optional uint32 used_heap_MB = 3;
|
||||
|
||||
/** the maximum allowable size of the heap, in MB. */
|
||||
optional uint32 max_heap_MB = 4;
|
||||
|
||||
/** Information on the load of individual regions. */
|
||||
repeated RegionLoad region_loads = 5;
|
||||
|
||||
/**
|
||||
* Regionserver-level coprocessors, e.g., WALObserver implementations.
|
||||
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
|
||||
* objects.
|
||||
*/
|
||||
repeated Coprocessor coprocessors = 6;
|
||||
|
||||
/**
|
||||
* Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
|
||||
* time is measured as the difference, measured in milliseconds, between the current time
|
||||
* and midnight, January 1, 1970 UTC.
|
||||
*/
|
||||
optional uint64 report_start_time = 7;
|
||||
|
||||
/**
|
||||
* Time when report was generated.
|
||||
* time is measured as the difference, measured in milliseconds, between the current time
|
||||
* and midnight, January 1, 1970 UTC.
|
||||
*/
|
||||
optional uint64 report_end_time = 8;
|
||||
|
||||
/**
|
||||
* The port number that this region server is hosing an info server on.
|
||||
*/
|
||||
optional uint32 info_server_port = 9;
|
||||
|
||||
/**
|
||||
* The replicationLoadSource for the replication Source status of this region server.
|
||||
*/
|
||||
repeated ReplicationLoadSource replLoadSource = 10;
|
||||
|
||||
/**
|
||||
* The replicationLoadSink for the replication Sink status of this region server.
|
||||
*/
|
||||
optional ReplicationLoadSink replLoadSink = 11;
|
||||
|
||||
/**
|
||||
* The metrics for each user on this region server
|
||||
*/
|
||||
repeated UserLoad userLoads = 12;
|
||||
}
|
||||
|
||||
message LiveServerInfo {
|
||||
required ServerName server = 1;
|
||||
required ServerLoad server_load = 2;
|
||||
}
|
||||
|
||||
message ClusterStatus {
|
||||
optional HBaseVersionFileContent hbase_version = 1;
|
||||
repeated LiveServerInfo live_servers = 2;
|
||||
repeated ServerName dead_servers = 3;
|
||||
repeated RegionInTransition regions_in_transition = 4;
|
||||
optional ClusterId cluster_id = 5;
|
||||
repeated Coprocessor master_coprocessors = 6;
|
||||
optional ServerName master = 7;
|
||||
repeated ServerName backup_masters = 8;
|
||||
optional bool balancer_on = 9;
|
||||
}
|
||||
|
||||
enum Option {
|
||||
HBASE_VERSION = 0;
|
||||
CLUSTER_ID = 1;
|
||||
LIVE_SERVERS = 2;
|
||||
DEAD_SERVERS = 3;
|
||||
MASTER = 4;
|
||||
BACKUP_MASTERS = 5;
|
||||
MASTER_COPROCESSORS = 6;
|
||||
REGIONS_IN_TRANSITION = 7;
|
||||
BALANCER_ON = 8;
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers that are used for filters
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "ComparatorProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
// This file contains protocol buffers that are used for comparators (e.g. in filters)
|
||||
|
||||
message Comparator {
|
||||
required string name = 1;
|
||||
optional bytes serialized_comparator = 2;
|
||||
}
|
||||
|
||||
message ByteArrayComparable {
|
||||
optional bytes value = 1;
|
||||
}
|
||||
|
||||
message BinaryComparator {
|
||||
required ByteArrayComparable comparable = 1;
|
||||
}
|
||||
|
||||
message LongComparator {
|
||||
required ByteArrayComparable comparable = 1;
|
||||
}
|
||||
|
||||
message BinaryPrefixComparator {
|
||||
required ByteArrayComparable comparable = 1;
|
||||
}
|
||||
|
||||
message BitComparator {
|
||||
required ByteArrayComparable comparable = 1;
|
||||
required BitwiseOp bitwise_op = 2;
|
||||
|
||||
enum BitwiseOp {
|
||||
AND = 1;
|
||||
OR = 2;
|
||||
XOR = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message NullComparator {
|
||||
}
|
||||
|
||||
message RegexStringComparator {
|
||||
required string pattern = 1;
|
||||
required int32 pattern_flags = 2;
|
||||
required string charset = 3;
|
||||
optional string engine = 4;
|
||||
}
|
||||
|
||||
message SubstringComparator {
|
||||
required string substr = 1;
|
||||
}
|
||||
|
||||
message BigDecimalComparator {
|
||||
required ByteArrayComparable comparable = 1;
|
||||
}
|
||||
|
||||
message BinaryComponentComparator {
|
||||
required bytes value = 1;
|
||||
required uint32 offset = 2;
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers used for encryption
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "EncryptionProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
message WrappedKey {
|
||||
required string algorithm = 1;
|
||||
required uint32 length = 2;
|
||||
required bytes data = 3;
|
||||
optional bytes iv = 4;
|
||||
optional bytes hash = 5;
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers that are used for error handling
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "ErrorHandlingProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
/**
|
||||
* Protobuf version of a java.lang.StackTraceElement
|
||||
* so we can serialize exceptions.
|
||||
*/
|
||||
message StackTraceElementMessage {
|
||||
optional string declaring_class = 1;
|
||||
optional string method_name = 2;
|
||||
optional string file_name = 3;
|
||||
optional int32 line_number = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cause of a remote failure for a generic exception. Contains
|
||||
* all the information for a generic exception as well as
|
||||
* optional info about the error for generic info passing
|
||||
* (which should be another protobuffed class).
|
||||
*/
|
||||
message GenericExceptionMessage {
|
||||
optional string class_name = 1;
|
||||
optional string message = 2;
|
||||
optional bytes error_info = 3;
|
||||
repeated StackTraceElementMessage trace = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Exception sent across the wire when a remote task needs
|
||||
* to notify other tasks that it failed and why
|
||||
*/
|
||||
message ForeignExceptionMessage {
|
||||
optional string source = 1;
|
||||
optional GenericExceptionMessage generic_exception = 2;
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers that are written into the filesystem
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "FSProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
/**
|
||||
* The ${HBASE_ROOTDIR}/hbase.version file content
|
||||
*/
|
||||
message HBaseVersionFileContent {
|
||||
required string version = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reference file content used when we split an hfile under a region.
|
||||
*/
|
||||
message Reference {
|
||||
required bytes splitkey = 1;
|
||||
enum Range {
|
||||
TOP = 0;
|
||||
BOTTOM = 1;
|
||||
}
|
||||
required Range range = 2;
|
||||
}
|
||||
|
|
@ -1,179 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers that are used for filters
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "FilterProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
import "Comparator.proto";
|
||||
|
||||
message Filter {
|
||||
required string name = 1;
|
||||
optional bytes serialized_filter = 2;
|
||||
}
|
||||
|
||||
message ColumnCountGetFilter {
|
||||
required int32 limit = 1;
|
||||
}
|
||||
|
||||
message ColumnPaginationFilter {
|
||||
required int32 limit = 1;
|
||||
optional int32 offset = 2;
|
||||
optional bytes column_offset = 3;
|
||||
}
|
||||
|
||||
message ColumnPrefixFilter {
|
||||
required bytes prefix = 1;
|
||||
}
|
||||
|
||||
message ColumnRangeFilter {
|
||||
optional bytes min_column = 1;
|
||||
optional bool min_column_inclusive = 2;
|
||||
optional bytes max_column = 3;
|
||||
optional bool max_column_inclusive = 4;
|
||||
}
|
||||
|
||||
message CompareFilter {
|
||||
required CompareType compare_op = 1;
|
||||
optional Comparator comparator = 2;
|
||||
}
|
||||
|
||||
message DependentColumnFilter {
|
||||
required CompareFilter compare_filter = 1;
|
||||
optional bytes column_family = 2;
|
||||
optional bytes column_qualifier = 3;
|
||||
optional bool drop_dependent_column = 4;
|
||||
}
|
||||
|
||||
message FamilyFilter {
|
||||
required CompareFilter compare_filter = 1;
|
||||
}
|
||||
|
||||
message FilterList {
|
||||
required Operator operator = 1;
|
||||
repeated Filter filters = 2;
|
||||
|
||||
enum Operator {
|
||||
MUST_PASS_ALL = 1;
|
||||
MUST_PASS_ONE = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message FilterWrapper {
|
||||
required Filter filter = 1;
|
||||
}
|
||||
|
||||
message FirstKeyOnlyFilter {
|
||||
}
|
||||
|
||||
message FirstKeyValueMatchingQualifiersFilter {
|
||||
repeated bytes qualifiers = 1;
|
||||
}
|
||||
|
||||
message FuzzyRowFilter {
|
||||
repeated BytesBytesPair fuzzy_keys_data = 1;
|
||||
}
|
||||
|
||||
message InclusiveStopFilter {
|
||||
optional bytes stop_row_key = 1;
|
||||
}
|
||||
|
||||
message KeyOnlyFilter {
|
||||
required bool len_as_val = 1;
|
||||
}
|
||||
|
||||
message MultipleColumnPrefixFilter {
|
||||
repeated bytes sorted_prefixes = 1;
|
||||
}
|
||||
|
||||
message PageFilter {
|
||||
required int64 page_size = 1;
|
||||
}
|
||||
|
||||
message PrefixFilter {
|
||||
optional bytes prefix = 1;
|
||||
}
|
||||
|
||||
message QualifierFilter {
|
||||
required CompareFilter compare_filter = 1;
|
||||
}
|
||||
|
||||
message RandomRowFilter {
|
||||
required float chance = 1;
|
||||
}
|
||||
|
||||
message RowFilter {
|
||||
required CompareFilter compare_filter = 1;
|
||||
}
|
||||
|
||||
message SingleColumnValueExcludeFilter {
|
||||
required SingleColumnValueFilter single_column_value_filter = 1;
|
||||
}
|
||||
|
||||
message SingleColumnValueFilter {
|
||||
optional bytes column_family = 1;
|
||||
optional bytes column_qualifier = 2;
|
||||
required CompareType compare_op = 3;
|
||||
required Comparator comparator = 4;
|
||||
optional bool filter_if_missing = 5;
|
||||
optional bool latest_version_only = 6;
|
||||
}
|
||||
|
||||
message SkipFilter {
|
||||
required Filter filter = 1;
|
||||
}
|
||||
|
||||
message TimestampsFilter {
|
||||
repeated int64 timestamps = 1 [packed=true];
|
||||
optional bool can_hint = 2;
|
||||
}
|
||||
|
||||
message ValueFilter {
|
||||
required CompareFilter compare_filter = 1;
|
||||
}
|
||||
|
||||
message WhileMatchFilter {
|
||||
required Filter filter = 1;
|
||||
}
|
||||
message FilterAllFilter {
|
||||
}
|
||||
|
||||
message RowRange {
|
||||
optional bytes start_row = 1;
|
||||
optional bool start_row_inclusive = 2;
|
||||
optional bytes stop_row = 3;
|
||||
optional bool stop_row_inclusive =4;
|
||||
}
|
||||
|
||||
message MultiRowRangeFilter {
|
||||
repeated RowRange row_range_list = 1;
|
||||
}
|
||||
|
||||
message ColumnValueFilter {
|
||||
required bytes family = 1;
|
||||
required bytes qualifier = 2;
|
||||
required CompareType compare_op = 3;
|
||||
required Comparator comparator = 4;
|
||||
}
|
|
@ -1,254 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers that are shared throughout HBase
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "HBaseProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
|
||||
/**
|
||||
* Table Name
|
||||
*/
|
||||
message TableName {
|
||||
required bytes namespace = 1;
|
||||
required bytes qualifier = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Table Schema
|
||||
* Inspired by the rest TableSchema
|
||||
*/
|
||||
message TableSchema {
|
||||
optional TableName table_name = 1;
|
||||
repeated BytesBytesPair attributes = 2;
|
||||
repeated ColumnFamilySchema column_families = 3;
|
||||
repeated NameStringPair configuration = 4;
|
||||
}
|
||||
|
||||
/** Denotes state of the table */
|
||||
message TableState {
|
||||
// Table's current state
|
||||
enum State {
|
||||
ENABLED = 0;
|
||||
DISABLED = 1;
|
||||
DISABLING = 2;
|
||||
ENABLING = 3;
|
||||
}
|
||||
// This is the table's state.
|
||||
required State state = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Column Family Schema
|
||||
* Inspired by the rest ColumSchemaMessage
|
||||
*/
|
||||
message ColumnFamilySchema {
|
||||
required bytes name = 1;
|
||||
repeated BytesBytesPair attributes = 2;
|
||||
repeated NameStringPair configuration = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol buffer version of HRegionInfo.
|
||||
*/
|
||||
message RegionInfo {
|
||||
required uint64 region_id = 1;
|
||||
required TableName table_name = 2;
|
||||
optional bytes start_key = 3;
|
||||
optional bytes end_key = 4;
|
||||
optional bool offline = 5;
|
||||
optional bool split = 6;
|
||||
optional int32 replica_id = 7 [default = 0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol buffer for favored nodes
|
||||
*/
|
||||
message FavoredNodes {
|
||||
repeated ServerName favored_node = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Container protocol buffer to specify a region.
|
||||
* You can specify region by region name, or the hash
|
||||
* of the region name, which is known as encoded
|
||||
* region name.
|
||||
*/
|
||||
message RegionSpecifier {
|
||||
required RegionSpecifierType type = 1;
|
||||
required bytes value = 2;
|
||||
|
||||
enum RegionSpecifierType {
|
||||
// <tablename>,<startkey>,<regionId>.<encodedName>
|
||||
REGION_NAME = 1;
|
||||
|
||||
// hash of <tablename>,<startkey>,<regionId>
|
||||
ENCODED_REGION_NAME = 2;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A range of time. Both from and to are Java time
|
||||
* stamp in milliseconds. If you don't specify a time
|
||||
* range, it means all time. By default, if not
|
||||
* specified, from = 0, and to = Long.MAX_VALUE
|
||||
*/
|
||||
message TimeRange {
|
||||
optional uint64 from = 1;
|
||||
optional uint64 to = 2;
|
||||
}
|
||||
|
||||
/* ColumnFamily Specific TimeRange */
|
||||
message ColumnFamilyTimeRange {
|
||||
required bytes column_family = 1;
|
||||
required TimeRange time_range = 2;
|
||||
}
|
||||
|
||||
/* Comparison operators */
|
||||
enum CompareType {
|
||||
LESS = 0;
|
||||
LESS_OR_EQUAL = 1;
|
||||
EQUAL = 2;
|
||||
NOT_EQUAL = 3;
|
||||
GREATER_OR_EQUAL = 4;
|
||||
GREATER = 5;
|
||||
NO_OP = 6;
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol buffer version of ServerName
|
||||
*/
|
||||
message ServerName {
|
||||
required string host_name = 1;
|
||||
optional uint32 port = 2;
|
||||
optional uint64 start_code = 3;
|
||||
}
|
||||
|
||||
// Comment data structures
|
||||
|
||||
message Coprocessor {
|
||||
required string name = 1;
|
||||
}
|
||||
|
||||
message NameStringPair {
|
||||
required string name = 1;
|
||||
required string value = 2;
|
||||
}
|
||||
|
||||
message NameBytesPair {
|
||||
required string name = 1;
|
||||
optional bytes value = 2;
|
||||
}
|
||||
|
||||
message BytesBytesPair {
|
||||
required bytes first = 1;
|
||||
required bytes second = 2;
|
||||
}
|
||||
|
||||
message NameInt64Pair {
|
||||
optional string name = 1;
|
||||
optional int64 value = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Description of the snapshot to take
|
||||
*/
|
||||
message SnapshotDescription {
|
||||
required string name = 1;
|
||||
optional string table = 2; // not needed for delete, but checked for in taking snapshot
|
||||
optional int64 creation_time = 3 [default = 0];
|
||||
enum Type {
|
||||
DISABLED = 0;
|
||||
FLUSH = 1;
|
||||
SKIPFLUSH = 2;
|
||||
}
|
||||
optional Type type = 4 [default = FLUSH];
|
||||
optional int32 version = 5;
|
||||
optional string owner = 6;
|
||||
optional int64 ttl = 7 [default = 0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Description of the distributed procedure to take
|
||||
*/
|
||||
message ProcedureDescription {
|
||||
required string signature = 1; // the unique signature of the procedure
|
||||
optional string instance = 2; // the procedure instance name
|
||||
optional int64 creation_time = 3 [default = 0];
|
||||
repeated NameStringPair configuration = 4;
|
||||
}
|
||||
|
||||
message EmptyMsg {
|
||||
}
|
||||
|
||||
enum TimeUnit {
|
||||
NANOSECONDS = 1;
|
||||
MICROSECONDS = 2;
|
||||
MILLISECONDS = 3;
|
||||
SECONDS = 4;
|
||||
MINUTES = 5;
|
||||
HOURS = 6;
|
||||
DAYS = 7;
|
||||
}
|
||||
|
||||
message LongMsg {
|
||||
required int64 long_msg = 1;
|
||||
}
|
||||
|
||||
message DoubleMsg {
|
||||
required double double_msg = 1;
|
||||
}
|
||||
|
||||
message BigDecimalMsg {
|
||||
required bytes bigdecimal_msg = 1;
|
||||
}
|
||||
|
||||
message UUID {
|
||||
required uint64 least_sig_bits = 1;
|
||||
required uint64 most_sig_bits = 2;
|
||||
}
|
||||
|
||||
message NamespaceDescriptor {
|
||||
required bytes name = 1;
|
||||
repeated NameStringPair configuration = 2;
|
||||
}
|
||||
|
||||
// Rpc client version info proto. Included in ConnectionHeader on connection setup
|
||||
message VersionInfo {
|
||||
required string version = 1;
|
||||
required string url = 2;
|
||||
required string revision = 3;
|
||||
required string user = 4;
|
||||
required string date = 5;
|
||||
required string src_checksum = 6;
|
||||
optional uint32 version_major = 7;
|
||||
optional uint32 version_minor = 8;
|
||||
}
|
||||
|
||||
/**
|
||||
* Description of the region server info
|
||||
*/
|
||||
message RegionServerInfo {
|
||||
optional int32 infoPort = 1;
|
||||
optional VersionInfo version_info = 2;
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "HFileProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
|
||||
// Map of name/values
|
||||
message FileInfoProto {
|
||||
repeated BytesBytesPair map_entry = 1;
|
||||
}
|
||||
|
||||
// HFile file trailer
|
||||
message FileTrailerProto {
|
||||
optional uint64 file_info_offset = 1;
|
||||
optional uint64 load_on_open_data_offset = 2;
|
||||
optional uint64 uncompressed_data_index_size = 3;
|
||||
optional uint64 total_uncompressed_bytes = 4;
|
||||
optional uint32 data_index_count = 5;
|
||||
optional uint32 meta_index_count = 6;
|
||||
optional uint64 entry_count = 7;
|
||||
optional uint32 num_data_index_levels = 8;
|
||||
optional uint64 first_data_block_offset = 9;
|
||||
optional uint64 last_data_block_offset = 10;
|
||||
optional string comparator_class_name = 11;
|
||||
optional uint32 compression_codec = 12;
|
||||
optional bytes encryption_key = 13;
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// This file contains protocol buffers to represent the state of the load balancer.
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "LoadBalancerProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
message LoadBalancerState {
|
||||
optional bool balancer_on = 1;
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
//This file includes protocol buffers used in MapReduce only.
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "MapReduceProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
|
||||
message ScanMetrics {
|
||||
repeated NameInt64Pair metrics = 1;
|
||||
}
|
||||
|
||||
message TableSnapshotRegionSplit {
|
||||
repeated string locations = 2;
|
||||
optional TableSchema table = 3;
|
||||
optional RegionInfo region = 4;
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
import "Client.proto";
|
||||
import "HBase.proto";
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "MultiRowMutationProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option java_generic_services = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
message MultiRowMutationProcessorRequest{
|
||||
}
|
||||
|
||||
message MultiRowMutationProcessorResponse{
|
||||
}
|
||||
|
||||
message MutateRowsRequest {
|
||||
repeated MutationProto mutation_request = 1;
|
||||
optional uint64 nonce_group = 2;
|
||||
optional uint64 nonce = 3;
|
||||
optional RegionSpecifier region = 4;
|
||||
}
|
||||
|
||||
message MutateRowsResponse {
|
||||
}
|
||||
|
||||
service MultiRowMutationService {
|
||||
rpc MutateRows(MutateRowsRequest)
|
||||
returns(MutateRowsResponse);
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// Coprocessor test
|
||||
option java_package = "org.apache.hadoop.hbase.coprocessor.protobuf.generated";
|
||||
option java_outer_classname = "PingProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
|
||||
message PingRequest {
|
||||
}
|
||||
|
||||
message PingResponse {
|
||||
required string pong = 1;
|
||||
}
|
||||
|
||||
message CountRequest {
|
||||
}
|
||||
|
||||
message CountResponse {
|
||||
required int32 count = 1;
|
||||
}
|
||||
|
||||
message IncrementCountRequest {
|
||||
required int32 diff = 1;
|
||||
}
|
||||
|
||||
message IncrementCountResponse {
|
||||
required int32 count = 1;
|
||||
}
|
||||
|
||||
message HelloRequest {
|
||||
optional string name = 1;
|
||||
}
|
||||
|
||||
message HelloResponse {
|
||||
optional string response = 1;
|
||||
}
|
||||
|
||||
message NoopRequest {
|
||||
}
|
||||
|
||||
message NoopResponse {
|
||||
}
|
||||
|
||||
service PingService {
|
||||
rpc ping(PingRequest) returns(PingResponse);
|
||||
rpc count(CountRequest) returns(CountResponse);
|
||||
rpc increment(IncrementCountRequest) returns(IncrementCountResponse);
|
||||
rpc hello(HelloRequest) returns(HelloResponse);
|
||||
rpc noop(NoopRequest) returns(NoopResponse);
|
||||
}
|
|
@ -1,113 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "QuotaProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
|
||||
enum QuotaScope {
|
||||
CLUSTER = 1;
|
||||
MACHINE = 2;
|
||||
}
|
||||
|
||||
message TimedQuota {
|
||||
required TimeUnit time_unit = 1;
|
||||
optional uint64 soft_limit = 2;
|
||||
optional float share = 3;
|
||||
optional QuotaScope scope = 4 [default = MACHINE];
|
||||
}
|
||||
|
||||
enum ThrottleType {
|
||||
REQUEST_NUMBER = 1;
|
||||
REQUEST_SIZE = 2;
|
||||
WRITE_NUMBER = 3;
|
||||
WRITE_SIZE = 4;
|
||||
READ_NUMBER = 5;
|
||||
READ_SIZE = 6;
|
||||
}
|
||||
|
||||
message Throttle {
|
||||
optional TimedQuota req_num = 1;
|
||||
optional TimedQuota req_size = 2;
|
||||
|
||||
optional TimedQuota write_num = 3;
|
||||
optional TimedQuota write_size = 4;
|
||||
|
||||
optional TimedQuota read_num = 5;
|
||||
optional TimedQuota read_size = 6;
|
||||
}
|
||||
|
||||
message ThrottleRequest {
|
||||
optional ThrottleType type = 1;
|
||||
optional TimedQuota timed_quota = 2;
|
||||
}
|
||||
|
||||
enum QuotaType {
|
||||
THROTTLE = 1;
|
||||
SPACE = 2;
|
||||
}
|
||||
|
||||
message Quotas {
|
||||
optional bool bypass_globals = 1 [default = false];
|
||||
optional Throttle throttle = 2;
|
||||
optional SpaceQuota space = 3;
|
||||
}
|
||||
|
||||
message QuotaUsage {
|
||||
}
|
||||
|
||||
// Defines what action should be taken when the SpaceQuota is violated
|
||||
enum SpaceViolationPolicy {
|
||||
DISABLE = 1; // Disable the table(s)
|
||||
NO_WRITES_COMPACTIONS = 2; // No writes, bulk-loads, or compactions
|
||||
NO_WRITES = 3; // No writes or bulk-loads
|
||||
NO_INSERTS = 4; // No puts or bulk-loads, but deletes are allowed
|
||||
}
|
||||
|
||||
// Defines a limit on the amount of filesystem space used by a table/namespace
|
||||
message SpaceQuota {
|
||||
optional uint64 soft_limit = 1; // The limit of bytes for this quota
|
||||
optional SpaceViolationPolicy violation_policy = 2; // The action to take when the quota is violated
|
||||
optional bool remove = 3 [default = false]; // When true, remove the quota.
|
||||
}
|
||||
|
||||
// The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
|
||||
message SpaceLimitRequest {
|
||||
optional SpaceQuota quota = 1;
|
||||
}
|
||||
|
||||
// Represents the state of a quota on a table. Either the quota is not in violation
|
||||
// or it is in violatino there is a violation policy which should be in effect.
|
||||
message SpaceQuotaStatus {
|
||||
optional SpaceViolationPolicy violation_policy = 1;
|
||||
optional bool in_violation = 2;
|
||||
}
|
||||
|
||||
// Message stored in the value of hbase:quota table to denote the status of a table WRT
|
||||
// the quota applicable to it.
|
||||
message SpaceQuotaSnapshot {
|
||||
optional SpaceQuotaStatus quota_status = 1;
|
||||
optional uint64 quota_usage = 2;
|
||||
optional uint64 quota_limit = 3;
|
||||
}
|
|
@ -1,138 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
import "Tracing.proto";
|
||||
import "HBase.proto";
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "RPCProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
// See https://issues.apache.org/jira/browse/HBASE-7898 for high-level
|
||||
// description of RPC specification.
|
||||
//
|
||||
// On connection setup, the client sends six bytes of preamble -- a four
|
||||
// byte magic, a byte of version, and a byte of authentication type.
|
||||
//
|
||||
// We then send a "ConnectionHeader" protobuf of user information and the
|
||||
// 'protocol' or 'service' that is to be run over this connection as well as
|
||||
// info such as codecs and compression to use when we send cell blocks(see below).
|
||||
// This connection header protobuf is prefaced by an int that holds the length
|
||||
// of this connection header (this is NOT a varint). The pb connection header
|
||||
// is sent with Message#writeTo. The server throws an exception if it doesn't
|
||||
// like what it was sent noting what it is objecting too. Otherwise, the server
|
||||
// says nothing and is open for business.
|
||||
//
|
||||
// Hereafter the client makes requests and the server returns responses.
|
||||
//
|
||||
// Requests look like this:
|
||||
//
|
||||
// <An int with the total length of the request>
|
||||
// <RequestHeader Message written out using Message#writeDelimitedTo>
|
||||
// <Optionally a Request Parameter Message written out using Message#writeDelimitedTo>
|
||||
// <Optionally a Cell block>
|
||||
//
|
||||
// ...where the Request Parameter Message is whatever the method name stipulated
|
||||
// in the RequestHeader expects; e.g. if the method is a scan, then the pb
|
||||
// Request Message is a GetRequest, or a ScanRequest. A block of Cells
|
||||
// optionally follows. The presence of a Request param Message and/or a
|
||||
// block of Cells will be noted in the RequestHeader.
|
||||
//
|
||||
// Response is the mirror of the request:
|
||||
//
|
||||
// <An int with the total length of the response>
|
||||
// <ResponseHeader Message written out using Message#writeDelimitedTo>
|
||||
// <Optionally a Response Result Message written out using Message#writeDelimitedTo>
|
||||
// <Optionally a Cell block>
|
||||
//
|
||||
// ...where the Response Message is the response type that goes with the
|
||||
// method specified when making the request and the follow on Cell blocks may
|
||||
// or may not be there -- read the response header to find out if one following.
|
||||
// If an exception, it will be included inside the Response Header.
|
||||
//
|
||||
// Any time we write a pb, we do it with Message#writeDelimitedTo EXCEPT when
|
||||
// the connection header is sent; this is prefaced by an int with its length
|
||||
// and the pb connection header is then written with Message#writeTo.
|
||||
//
|
||||
|
||||
// User Information proto. Included in ConnectionHeader on connection setup
|
||||
message UserInformation {
|
||||
required string effective_user = 1;
|
||||
optional string real_user = 2;
|
||||
}
|
||||
|
||||
// This is sent on connection setup after the connection preamble is sent.
|
||||
message ConnectionHeader {
|
||||
optional UserInformation user_info = 1;
|
||||
optional string service_name = 2;
|
||||
// Cell block codec we will use sending over optional cell blocks. Server throws exception
|
||||
// if cannot deal. Null means no codec'ing going on so we are pb all the time (SLOW!!!)
|
||||
optional string cell_block_codec_class = 3;
|
||||
// Compressor we will use if cell block is compressed. Server will throw exception if not supported.
|
||||
// Class must implement hadoop's CompressionCodec Interface. Can't compress if no codec.
|
||||
optional string cell_block_compressor_class = 4;
|
||||
optional VersionInfo version_info = 5;
|
||||
}
|
||||
|
||||
// Optional Cell block Message. Included in client RequestHeader
|
||||
message CellBlockMeta {
|
||||
// Length of the following cell block. Could calculate it but convenient having it too hand.
|
||||
optional uint32 length = 1;
|
||||
}
|
||||
|
||||
// At the RPC layer, this message is used to carry
|
||||
// the server side exception to the RPC client.
|
||||
message ExceptionResponse {
|
||||
// Class name of the exception thrown from the server
|
||||
optional string exception_class_name = 1;
|
||||
// Exception stack trace from the server side
|
||||
optional string stack_trace = 2;
|
||||
// Optional hostname. Filled in for some exceptions such as region moved
|
||||
// where exception gives clue on where the region may have moved.
|
||||
optional string hostname = 3;
|
||||
optional int32 port = 4;
|
||||
// Set if we are NOT to retry on receipt of this exception
|
||||
optional bool do_not_retry = 5;
|
||||
}
|
||||
|
||||
// Header sent making a request.
|
||||
message RequestHeader {
|
||||
// Monotonically increasing call_id to keep track of RPC requests and their response
|
||||
optional uint32 call_id = 1;
|
||||
optional RPCTInfo trace_info = 2;
|
||||
optional string method_name = 3;
|
||||
// If true, then a pb Message param follows.
|
||||
optional bool request_param = 4;
|
||||
// If present, then an encoded data block follows.
|
||||
optional CellBlockMeta cell_block_meta = 5;
|
||||
// 0 is NORMAL priority. 200 is HIGH. If no priority, treat it as NORMAL.
|
||||
// See HConstants.
|
||||
optional uint32 priority = 6;
|
||||
optional uint32 timeout = 7;
|
||||
}
|
||||
|
||||
message ResponseHeader {
|
||||
optional uint32 call_id = 1;
|
||||
// If present, then request threw an exception and no response message (else we presume one)
|
||||
optional ExceptionResponse exception = 2;
|
||||
// If present, then an encoded data block follows.
|
||||
optional CellBlockMeta cell_block_meta = 3;
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "RSGroupProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
|
||||
message RSGroupInfo {
|
||||
required string name = 1;
|
||||
repeated ServerName servers = 4;
|
||||
repeated TableName tables = 3;
|
||||
}
|
||||
|
|
@ -1,158 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "RSGroupAdminProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
import "RSGroup.proto";
|
||||
|
||||
/** Group level protobufs */
|
||||
|
||||
message ListTablesOfRSGroupRequest {
|
||||
required string r_s_group_name = 1;
|
||||
}
|
||||
|
||||
message ListTablesOfRSGroupResponse {
|
||||
repeated TableName table_name = 1;
|
||||
}
|
||||
|
||||
message GetRSGroupInfoRequest {
|
||||
required string r_s_group_name = 1;
|
||||
}
|
||||
|
||||
message GetRSGroupInfoResponse {
|
||||
optional RSGroupInfo r_s_group_info = 1;
|
||||
}
|
||||
|
||||
message GetRSGroupInfoOfTableRequest {
|
||||
required TableName table_name = 1;
|
||||
}
|
||||
|
||||
message GetRSGroupInfoOfTableResponse {
|
||||
optional RSGroupInfo r_s_group_info = 1;
|
||||
}
|
||||
|
||||
message MoveServersRequest {
|
||||
required string target_group = 1;
|
||||
repeated ServerName servers = 3;
|
||||
}
|
||||
|
||||
message MoveServersResponse {
|
||||
}
|
||||
|
||||
message MoveTablesRequest {
|
||||
required string target_group = 1;
|
||||
repeated TableName table_name = 2;
|
||||
}
|
||||
|
||||
message MoveTablesResponse {
|
||||
}
|
||||
|
||||
message AddRSGroupRequest {
|
||||
required string r_s_group_name = 1;
|
||||
}
|
||||
|
||||
message AddRSGroupResponse {
|
||||
}
|
||||
|
||||
message RemoveRSGroupRequest {
|
||||
required string r_s_group_name = 1;
|
||||
}
|
||||
|
||||
message RemoveRSGroupResponse {
|
||||
}
|
||||
|
||||
message BalanceRSGroupRequest {
|
||||
required string r_s_group_name = 1;
|
||||
}
|
||||
|
||||
message BalanceRSGroupResponse {
|
||||
required bool balanceRan = 1;
|
||||
}
|
||||
|
||||
message ListRSGroupInfosRequest {
|
||||
}
|
||||
|
||||
message ListRSGroupInfosResponse {
|
||||
repeated RSGroupInfo r_s_group_info = 1;
|
||||
}
|
||||
|
||||
message GetRSGroupInfoOfServerRequest {
|
||||
required ServerName server = 2;
|
||||
}
|
||||
|
||||
message GetRSGroupInfoOfServerResponse {
|
||||
optional RSGroupInfo r_s_group_info = 1;
|
||||
}
|
||||
|
||||
message MoveServersAndTablesRequest {
|
||||
required string target_group = 1;
|
||||
repeated ServerName servers = 2;
|
||||
repeated TableName table_name = 3;
|
||||
}
|
||||
|
||||
message MoveServersAndTablesResponse {
|
||||
}
|
||||
|
||||
message RemoveServersRequest {
|
||||
repeated ServerName servers = 1;
|
||||
}
|
||||
|
||||
message RemoveServersResponse {
|
||||
}
|
||||
|
||||
service RSGroupAdminService {
|
||||
rpc GetRSGroupInfo(GetRSGroupInfoRequest)
|
||||
returns (GetRSGroupInfoResponse);
|
||||
|
||||
rpc GetRSGroupInfoOfTable(GetRSGroupInfoOfTableRequest)
|
||||
returns (GetRSGroupInfoOfTableResponse);
|
||||
|
||||
rpc GetRSGroupInfoOfServer(GetRSGroupInfoOfServerRequest)
|
||||
returns (GetRSGroupInfoOfServerResponse);
|
||||
|
||||
rpc MoveServers(MoveServersRequest)
|
||||
returns (MoveServersResponse);
|
||||
|
||||
rpc MoveTables(MoveTablesRequest)
|
||||
returns (MoveTablesResponse);
|
||||
|
||||
rpc AddRSGroup(AddRSGroupRequest)
|
||||
returns (AddRSGroupResponse);
|
||||
|
||||
rpc RemoveRSGroup(RemoveRSGroupRequest)
|
||||
returns (RemoveRSGroupResponse);
|
||||
|
||||
rpc BalanceRSGroup(BalanceRSGroupRequest)
|
||||
returns (BalanceRSGroupResponse);
|
||||
|
||||
rpc ListRSGroupInfos(ListRSGroupInfosRequest)
|
||||
returns (ListRSGroupInfosResponse);
|
||||
|
||||
rpc MoveServersAndTables(MoveServersAndTablesRequest)
|
||||
returns (MoveServersAndTablesResponse);
|
||||
|
||||
rpc RemoveServers(RemoveServersRequest)
|
||||
returns (RemoveServersResponse);
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
/**
|
||||
* Defines a protocol to perform multi row transactions.
|
||||
* See BaseRowProcessorEndpoint for the implementation.
|
||||
* See HRegion#processRowsWithLocks() for details.
|
||||
*/
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "RowProcessorProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
message ProcessRequest {
|
||||
required string row_processor_class_name = 1;
|
||||
optional string row_processor_initializer_message_name = 2;
|
||||
optional bytes row_processor_initializer_message = 3;
|
||||
optional uint64 nonce_group = 4;
|
||||
optional uint64 nonce = 5;
|
||||
}
|
||||
|
||||
message ProcessResponse {
|
||||
required bytes row_processor_result = 1;
|
||||
}
|
||||
|
||||
service RowProcessorService {
|
||||
rpc Process(ProcessRequest) returns (ProcessResponse);
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "SnapshotProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "FS.proto";
|
||||
import "HBase.proto";
|
||||
|
||||
message SnapshotFileInfo {
|
||||
enum Type {
|
||||
HFILE = 1;
|
||||
WAL = 2;
|
||||
}
|
||||
|
||||
required Type type = 1;
|
||||
|
||||
optional string hfile = 3;
|
||||
|
||||
optional string wal_server = 4;
|
||||
optional string wal_name = 5;
|
||||
}
|
||||
|
||||
message SnapshotRegionManifest {
|
||||
optional int32 version = 1;
|
||||
|
||||
required RegionInfo region_info = 2;
|
||||
repeated FamilyFiles family_files = 3;
|
||||
|
||||
message StoreFile {
|
||||
required string name = 1;
|
||||
optional Reference reference = 2;
|
||||
|
||||
// TODO: Add checksums or other fields to verify the file
|
||||
optional uint64 file_size = 3;
|
||||
}
|
||||
|
||||
message FamilyFiles {
|
||||
required bytes family_name = 1;
|
||||
repeated StoreFile store_files = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message SnapshotDataManifest {
|
||||
required TableSchema table_schema = 1;
|
||||
repeated SnapshotRegionManifest region_manifests = 2;
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "TracingProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
//Used to pass through the information necessary to continue
|
||||
//a trace after an RPC is made. All we need is the traceid
|
||||
//(so we know the overarching trace this message is a part of), and
|
||||
//the id of the current span when this message was sent, so we know
|
||||
//what span caused the new span we will create when this message is received.
|
||||
message RPCTInfo {
|
||||
optional int64 trace_id = 1;
|
||||
optional int64 parent_id = 2;
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "VisibilityLabelsProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "Client.proto";
|
||||
|
||||
message VisibilityLabelsRequest {
|
||||
repeated VisibilityLabel visLabel = 1;
|
||||
}
|
||||
|
||||
message VisibilityLabel {
|
||||
required bytes label = 1;
|
||||
optional uint32 ordinal = 2;
|
||||
}
|
||||
|
||||
message VisibilityLabelsResponse {
|
||||
repeated RegionActionResult result = 1;
|
||||
}
|
||||
|
||||
message SetAuthsRequest {
|
||||
required bytes user = 1;
|
||||
repeated bytes auth = 2;
|
||||
}
|
||||
|
||||
message UserAuthorizations {
|
||||
required bytes user = 1;
|
||||
repeated uint32 auth = 2;
|
||||
}
|
||||
|
||||
message MultiUserAuthorizations {
|
||||
repeated UserAuthorizations userAuths = 1;
|
||||
}
|
||||
|
||||
message GetAuthsRequest {
|
||||
required bytes user = 1;
|
||||
}
|
||||
|
||||
message GetAuthsResponse {
|
||||
required bytes user = 1;
|
||||
repeated bytes auth = 2;
|
||||
}
|
||||
|
||||
message ListLabelsRequest {
|
||||
optional string regex = 1;
|
||||
}
|
||||
|
||||
message ListLabelsResponse {
|
||||
repeated bytes label = 1;
|
||||
}
|
||||
|
||||
service VisibilityLabelsService {
|
||||
rpc addLabels(VisibilityLabelsRequest)
|
||||
returns (VisibilityLabelsResponse);
|
||||
rpc setAuths(SetAuthsRequest)
|
||||
returns (VisibilityLabelsResponse);
|
||||
rpc clearAuths(SetAuthsRequest)
|
||||
returns (VisibilityLabelsResponse);
|
||||
rpc getAuths(GetAuthsRequest)
|
||||
returns (GetAuthsResponse);
|
||||
rpc listLabels(ListLabelsRequest)
|
||||
returns (ListLabelsResponse);
|
||||
}
|
|
@ -1,177 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "WALProtos";
|
||||
option java_generic_services = false;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
|
||||
message WALHeader {
|
||||
optional bool has_compression = 1;
|
||||
optional bytes encryption_key = 2;
|
||||
optional bool has_tag_compression = 3;
|
||||
optional string writer_cls_name = 4;
|
||||
optional string cell_codec_cls_name = 5;
|
||||
}
|
||||
|
||||
/*
|
||||
* Protocol buffer version of WALKey; see WALKey comment, not really a key but WALEdit header
|
||||
* for some KVs
|
||||
*/
|
||||
message WALKey {
|
||||
required bytes encoded_region_name = 1;
|
||||
required bytes table_name = 2;
|
||||
required uint64 log_sequence_number = 3;
|
||||
required uint64 write_time = 4;
|
||||
/*
|
||||
This parameter is deprecated in favor of clusters which
|
||||
contains the list of clusters that have consumed the change.
|
||||
It is retained so that the log created by earlier releases (0.94)
|
||||
can be read by the newer releases.
|
||||
*/
|
||||
optional UUID cluster_id = 5 [deprecated=true];
|
||||
|
||||
repeated FamilyScope scopes = 6;
|
||||
optional uint32 following_kv_count = 7;
|
||||
|
||||
/*
|
||||
This field contains the list of clusters that have
|
||||
consumed the change
|
||||
*/
|
||||
repeated UUID cluster_ids = 8;
|
||||
|
||||
optional uint64 nonceGroup = 9;
|
||||
optional uint64 nonce = 10;
|
||||
optional uint64 orig_sequence_number = 11;
|
||||
repeated Attribute extended_attributes = 12;
|
||||
/*
|
||||
optional CustomEntryType custom_entry_type = 9;
|
||||
|
||||
enum CustomEntryType {
|
||||
COMPACTION = 0;
|
||||
}
|
||||
*/
|
||||
}
|
||||
message Attribute {
|
||||
required string key = 1;
|
||||
required bytes value = 2;
|
||||
}
|
||||
|
||||
enum ScopeType {
|
||||
REPLICATION_SCOPE_LOCAL = 0;
|
||||
REPLICATION_SCOPE_GLOBAL = 1;
|
||||
}
|
||||
|
||||
message FamilyScope {
|
||||
required bytes family = 1;
|
||||
required ScopeType scope_type = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom WAL entries
|
||||
*/
|
||||
|
||||
/**
|
||||
* Special WAL entry to hold all related to a compaction.
|
||||
* Written to WAL before completing compaction. There is
|
||||
* sufficient info in the below message to complete later
|
||||
* the * compaction should we fail the WAL write.
|
||||
*/
|
||||
message CompactionDescriptor {
|
||||
required bytes table_name = 1; // TODO: WALKey already stores these, might remove
|
||||
required bytes encoded_region_name = 2;
|
||||
required bytes family_name = 3;
|
||||
repeated string compaction_input = 4; // relative to store dir
|
||||
repeated string compaction_output = 5;
|
||||
required string store_home_dir = 6; // relative to region dir
|
||||
optional bytes region_name = 7; // full region name
|
||||
}
|
||||
|
||||
/**
|
||||
* Special WAL entry to hold all related to a flush.
|
||||
*/
|
||||
message FlushDescriptor {
|
||||
enum FlushAction {
|
||||
START_FLUSH = 0;
|
||||
COMMIT_FLUSH = 1;
|
||||
ABORT_FLUSH = 2;
|
||||
CANNOT_FLUSH = 3; // marker for indicating that a flush has been requested but cannot complete
|
||||
}
|
||||
|
||||
message StoreFlushDescriptor {
|
||||
required bytes family_name = 1;
|
||||
required string store_home_dir = 2; //relative to region dir
|
||||
repeated string flush_output = 3; // relative to store dir (if this is a COMMIT_FLUSH)
|
||||
}
|
||||
|
||||
required FlushAction action = 1;
|
||||
required bytes table_name = 2;
|
||||
required bytes encoded_region_name = 3;
|
||||
optional uint64 flush_sequence_number = 4;
|
||||
repeated StoreFlushDescriptor store_flushes = 5;
|
||||
optional bytes region_name = 6; // full region name
|
||||
}
|
||||
|
||||
message StoreDescriptor {
|
||||
required bytes family_name = 1;
|
||||
required string store_home_dir = 2; //relative to region dir
|
||||
repeated string store_file = 3; // relative to store dir
|
||||
optional uint64 store_file_size_bytes = 4; // size of store file
|
||||
}
|
||||
|
||||
/**
|
||||
* Special WAL entry used for writing bulk load events to WAL
|
||||
*/
|
||||
message BulkLoadDescriptor {
|
||||
required TableName table_name = 1;
|
||||
required bytes encoded_region_name = 2;
|
||||
repeated StoreDescriptor stores = 3;
|
||||
required int64 bulkload_seq_num = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Special WAL entry to hold all related to a region event (open/close).
|
||||
*/
|
||||
message RegionEventDescriptor {
|
||||
enum EventType {
|
||||
REGION_OPEN = 0;
|
||||
REGION_CLOSE = 1;
|
||||
}
|
||||
|
||||
required EventType event_type = 1;
|
||||
required bytes table_name = 2;
|
||||
required bytes encoded_region_name = 3;
|
||||
optional uint64 log_sequence_number = 4;
|
||||
repeated StoreDescriptor stores = 5;
|
||||
optional ServerName server = 6; // Server who opened the region
|
||||
optional bytes region_name = 7; // full region name
|
||||
}
|
||||
|
||||
/**
|
||||
* A trailer that is appended to the end of a properly closed WAL file.
|
||||
* If missing, this is either a legacy or a corrupted WAL file.
|
||||
* N.B. This trailer currently doesn't contain any information and we
|
||||
* purposefully don't expose it in the WAL APIs. It's for future growth.
|
||||
*/
|
||||
message WALTrailer {
|
||||
}
|
|
@ -1,147 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
// ZNode data in hbase are serialized protobufs with a four byte
|
||||
// 'magic' 'PBUF' prefix.
|
||||
package hbase.pb;
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "ZooKeeperProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "HBase.proto";
|
||||
import "ClusterStatus.proto";
|
||||
|
||||
/**
|
||||
* Content of the meta-region-server znode.
|
||||
*/
|
||||
message MetaRegionServer {
|
||||
// The ServerName hosting the meta region currently, or destination server,
|
||||
// if meta region is in transition.
|
||||
required ServerName server = 1;
|
||||
// The major version of the rpc the server speaks. This is used so that
|
||||
// clients connecting to the cluster can have prior knowledge of what version
|
||||
// to send to a RegionServer. AsyncHBase will use this to detect versions.
|
||||
optional uint32 rpc_version = 2;
|
||||
|
||||
// State of the region transition. OPEN means fully operational 'hbase:meta'
|
||||
optional RegionState.State state = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Content of the master znode.
|
||||
*/
|
||||
message Master {
|
||||
// The ServerName of the current Master
|
||||
required ServerName master = 1;
|
||||
// Major RPC version so that clients can know what version the master can accept.
|
||||
optional uint32 rpc_version = 2;
|
||||
optional uint32 info_port = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Content of the '/hbase/running', cluster state, znode.
|
||||
*/
|
||||
message ClusterUp {
|
||||
// If this znode is present, cluster is up. Currently
|
||||
// the data is cluster start_date.
|
||||
required string start_date = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* WAL SplitLog directory znodes have this for content. Used doing distributed
|
||||
* WAL splitting. Holds current state and name of server that originated split.
|
||||
*/
|
||||
message SplitLogTask {
|
||||
enum State {
|
||||
UNASSIGNED = 0;
|
||||
OWNED = 1;
|
||||
RESIGNED = 2;
|
||||
DONE = 3;
|
||||
ERR = 4;
|
||||
}
|
||||
required State state = 1;
|
||||
required ServerName server_name = 2;
|
||||
// optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
|
||||
}
|
||||
|
||||
/**
|
||||
* The znode that holds state of table.
|
||||
* Deprected, table state is stored in table descriptor on HDFS.
|
||||
*/
|
||||
message DeprecatedTableState {
|
||||
// Table's current state
|
||||
enum State {
|
||||
ENABLED = 0;
|
||||
DISABLED = 1;
|
||||
DISABLING = 2;
|
||||
ENABLING = 3;
|
||||
}
|
||||
// This is the table's state. If no znode for a table,
|
||||
// its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class
|
||||
// for more.
|
||||
required State state = 1 [default = ENABLED];
|
||||
}
|
||||
|
||||
message TableCF {
|
||||
optional TableName table_name = 1;
|
||||
repeated bytes families = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used by replication. Holds a replication peer key.
|
||||
*/
|
||||
message ReplicationPeer {
|
||||
// clusterkey is the concatenation of the slave cluster's
|
||||
// hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
|
||||
required string clusterkey = 1;
|
||||
optional string replicationEndpointImpl = 2;
|
||||
repeated BytesBytesPair data = 3;
|
||||
repeated NameStringPair configuration = 4;
|
||||
repeated TableCF table_cfs = 5;
|
||||
repeated bytes namespaces = 6;
|
||||
optional int64 bandwidth = 7;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used by replication. Holds whether enabled or disabled
|
||||
*/
|
||||
message ReplicationState {
|
||||
enum State {
|
||||
ENABLED = 0;
|
||||
DISABLED = 1;
|
||||
}
|
||||
required State state = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used by replication. Holds the current position in an WAL file.
|
||||
*/
|
||||
message ReplicationHLogPosition {
|
||||
required int64 position = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* State of the switch.
|
||||
*/
|
||||
message SwitchState {
|
||||
optional bool enabled = 1;
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.ipc.protobuf.generated";
|
||||
option java_outer_classname = "TestProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
|
||||
message EmptyRequestProto {
|
||||
}
|
||||
|
||||
message EmptyResponseProto {
|
||||
}
|
||||
|
||||
message EchoRequestProto {
|
||||
required string message = 1;
|
||||
}
|
||||
|
||||
message EchoResponseProto {
|
||||
required string message = 1;
|
||||
}
|
||||
|
||||
message PauseRequestProto {
|
||||
required uint32 ms = 1;
|
||||
}
|
||||
|
||||
message AddrResponseProto {
|
||||
required string addr = 1;
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
syntax = "proto2";
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.ipc.protobuf.generated";
|
||||
option java_outer_classname = "TestRpcServiceProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
|
||||
import "test.proto";
|
||||
|
||||
|
||||
/**
|
||||
* A protobuf service for use in tests
|
||||
*/
|
||||
service TestProtobufRpcProto {
|
||||
rpc ping(EmptyRequestProto) returns (EmptyResponseProto);
|
||||
rpc echo(EchoRequestProto) returns (EchoResponseProto);
|
||||
rpc error(EmptyRequestProto) returns (EmptyResponseProto);
|
||||
rpc pause(PauseRequestProto) returns (EmptyResponseProto);
|
||||
rpc addr(EmptyRequestProto) returns (AddrResponseProto);
|
||||
}
|
|
@ -19,28 +19,26 @@
|
|||
|
||||
package org.apache.hadoop.hbase.rest.model;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlValue;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
|
||||
/**
|
||||
* Representation of a cell. A cell is a single value associated a column and
|
||||
* optional qualifier, and either the timestamp when it was stored or the user-
|
||||
|
|
|
@ -266,13 +266,6 @@
|
|||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<!--Needed by the visiblity tags and acl CPEP things
|
||||
in here in hbase-server (that should be out in hbase-endpoints
|
||||
or integrated). -->
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol-shaded</artifactId>
|
||||
|
|
|
@ -50,7 +50,6 @@ org.apache.hadoop.hbase.master.DeadServer;
|
|||
org.apache.hadoop.hbase.master.HMaster;
|
||||
org.apache.hadoop.hbase.master.RegionState;
|
||||
org.apache.hadoop.hbase.master.ServerManager;
|
||||
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
org.apache.hadoop.hbase.quotas.QuotaUtil;
|
||||
org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
|
||||
org.apache.hadoop.hbase.rsgroup.RSGroupUtil;
|
||||
|
|
|
@ -31,9 +31,6 @@ org.apache.hadoop.hbase.util.Bytes;
|
|||
org.apache.hadoop.hbase.HRegionInfo;
|
||||
org.apache.hadoop.hbase.ServerName;
|
||||
org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
|
||||
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
|
||||
org.apache.hadoop.hbase.util.DirectMemoryUtils;
|
||||
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
|
||||
java.lang.management.MemoryUsage;
|
||||
|
|
|
@ -358,12 +358,11 @@ public final class SnapshotDescriptionUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} stored for the snapshot in the passed directory
|
||||
* Read in the {@link SnapshotDescription} stored for the snapshot in the passed directory
|
||||
* @param fs filesystem where the snapshot was taken
|
||||
* @param snapshotDir directory where the snapshot was stored
|
||||
* @return the stored snapshot description
|
||||
* @throws CorruptedSnapshotException if the
|
||||
* snapshot cannot be read
|
||||
* @throws CorruptedSnapshotException if the snapshot cannot be read
|
||||
*/
|
||||
public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir)
|
||||
throws CorruptedSnapshotException {
|
||||
|
|
|
@ -1,383 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.protobuf;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import org.apache.hadoop.hbase.ByteBufferKeyValue;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellBuilderType;
|
||||
import org.apache.hadoop.hbase.CellComparatorImpl;
|
||||
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.Append;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Increment;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||
import org.apache.hadoop.hbase.io.TimeRange;
|
||||
import org.apache.hadoop.hbase.master.RegionState;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer;
|
||||
|
||||
/**
|
||||
* Class to test ProtobufUtil.
|
||||
*/
|
||||
@Category({ MiscTests.class, SmallTests.class})
|
||||
public class TestProtobufUtil {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestProtobufUtil.class);
|
||||
|
||||
@Test
|
||||
public void testException() throws IOException {
|
||||
NameBytesPair.Builder builder = NameBytesPair.newBuilder();
|
||||
final String omg = "OMG!!!";
|
||||
builder.setName("java.io.IOException");
|
||||
builder.setValue(ByteString.copyFrom(Bytes.toBytes(omg)));
|
||||
Throwable t = ProtobufUtil.toException(builder.build());
|
||||
assertEquals(omg, t.getMessage());
|
||||
builder.clear();
|
||||
builder.setName("org.apache.hadoop.ipc.RemoteException");
|
||||
builder.setValue(ByteString.copyFrom(Bytes.toBytes(omg)));
|
||||
t = ProtobufUtil.toException(builder.build());
|
||||
assertEquals(omg, t.getMessage());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test basic Get conversions.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testGet() throws IOException {
|
||||
ClientProtos.Get.Builder getBuilder = ClientProtos.Get.newBuilder();
|
||||
getBuilder.setRow(ByteString.copyFromUtf8("row"));
|
||||
Column.Builder columnBuilder = Column.newBuilder();
|
||||
columnBuilder.setFamily(ByteString.copyFromUtf8("f1"));
|
||||
columnBuilder.addQualifier(ByteString.copyFromUtf8("c1"));
|
||||
columnBuilder.addQualifier(ByteString.copyFromUtf8("c2"));
|
||||
getBuilder.addColumn(columnBuilder.build());
|
||||
|
||||
columnBuilder.clear();
|
||||
columnBuilder.setFamily(ByteString.copyFromUtf8("f2"));
|
||||
getBuilder.addColumn(columnBuilder.build());
|
||||
getBuilder.setLoadColumnFamiliesOnDemand(true);
|
||||
ClientProtos.Get proto = getBuilder.build();
|
||||
// default fields
|
||||
assertEquals(1, proto.getMaxVersions());
|
||||
assertEquals(true, proto.getCacheBlocks());
|
||||
|
||||
// set the default value for equal comparison
|
||||
getBuilder = ClientProtos.Get.newBuilder(proto);
|
||||
getBuilder.setMaxVersions(1);
|
||||
getBuilder.setCacheBlocks(true);
|
||||
getBuilder.setTimeRange(ProtobufUtil.toTimeRange(TimeRange.allTime()));
|
||||
|
||||
Get get = ProtobufUtil.toGet(proto);
|
||||
assertEquals(getBuilder.build(), ProtobufUtil.toGet(get));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test Append Mutate conversions.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testAppend() throws IOException {
|
||||
long timeStamp = 111111;
|
||||
MutationProto.Builder mutateBuilder = MutationProto.newBuilder();
|
||||
mutateBuilder.setRow(ByteString.copyFromUtf8("row"));
|
||||
mutateBuilder.setMutateType(MutationType.APPEND);
|
||||
mutateBuilder.setTimestamp(timeStamp);
|
||||
ColumnValue.Builder valueBuilder = ColumnValue.newBuilder();
|
||||
valueBuilder.setFamily(ByteString.copyFromUtf8("f1"));
|
||||
QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder();
|
||||
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1"));
|
||||
qualifierBuilder.setValue(ByteString.copyFromUtf8("v1"));
|
||||
qualifierBuilder.setTimestamp(timeStamp);
|
||||
valueBuilder.addQualifierValue(qualifierBuilder.build());
|
||||
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2"));
|
||||
qualifierBuilder.setValue(ByteString.copyFromUtf8("v2"));
|
||||
valueBuilder.addQualifierValue(qualifierBuilder.build());
|
||||
mutateBuilder.addColumnValue(valueBuilder.build());
|
||||
|
||||
MutationProto proto = mutateBuilder.build();
|
||||
// default fields
|
||||
assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability());
|
||||
|
||||
// set the default value for equal comparison
|
||||
mutateBuilder = MutationProto.newBuilder(proto);
|
||||
mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT);
|
||||
|
||||
Append append = ProtobufUtil.toAppend(proto, null);
|
||||
|
||||
// append always use the latest timestamp,
|
||||
// reset the timestamp to the original mutate
|
||||
mutateBuilder.setTimestamp(append.getTimestamp());
|
||||
mutateBuilder.setTimeRange(ProtobufUtil.toTimeRange(append.getTimeRange()));
|
||||
assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.APPEND, append));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test Delete Mutate conversions.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testDelete() throws IOException {
|
||||
MutationProto.Builder mutateBuilder = MutationProto.newBuilder();
|
||||
mutateBuilder.setRow(ByteString.copyFromUtf8("row"));
|
||||
mutateBuilder.setMutateType(MutationType.DELETE);
|
||||
mutateBuilder.setTimestamp(111111);
|
||||
ColumnValue.Builder valueBuilder = ColumnValue.newBuilder();
|
||||
valueBuilder.setFamily(ByteString.copyFromUtf8("f1"));
|
||||
QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder();
|
||||
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1"));
|
||||
qualifierBuilder.setDeleteType(DeleteType.DELETE_ONE_VERSION);
|
||||
qualifierBuilder.setTimestamp(111222);
|
||||
valueBuilder.addQualifierValue(qualifierBuilder.build());
|
||||
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2"));
|
||||
qualifierBuilder.setDeleteType(DeleteType.DELETE_MULTIPLE_VERSIONS);
|
||||
qualifierBuilder.setTimestamp(111333);
|
||||
valueBuilder.addQualifierValue(qualifierBuilder.build());
|
||||
mutateBuilder.addColumnValue(valueBuilder.build());
|
||||
|
||||
MutationProto proto = mutateBuilder.build();
|
||||
// default fields
|
||||
assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability());
|
||||
|
||||
// set the default value for equal comparison
|
||||
mutateBuilder = MutationProto.newBuilder(proto);
|
||||
mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT);
|
||||
|
||||
Delete delete = ProtobufUtil.toDelete(proto);
|
||||
|
||||
// delete always have empty value,
|
||||
// add empty value to the original mutate
|
||||
for (ColumnValue.Builder column:
|
||||
mutateBuilder.getColumnValueBuilderList()) {
|
||||
for (QualifierValue.Builder qualifier:
|
||||
column.getQualifierValueBuilderList()) {
|
||||
qualifier.setValue(ByteString.EMPTY);
|
||||
}
|
||||
}
|
||||
assertEquals(mutateBuilder.build(),
|
||||
ProtobufUtil.toMutation(MutationType.DELETE, delete));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test Increment Mutate conversions.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testIncrement() throws IOException {
|
||||
long timeStamp = 111111;
|
||||
MutationProto.Builder mutateBuilder = MutationProto.newBuilder();
|
||||
mutateBuilder.setRow(ByteString.copyFromUtf8("row"));
|
||||
mutateBuilder.setMutateType(MutationType.INCREMENT);
|
||||
ColumnValue.Builder valueBuilder = ColumnValue.newBuilder();
|
||||
valueBuilder.setFamily(ByteString.copyFromUtf8("f1"));
|
||||
QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder();
|
||||
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1"));
|
||||
qualifierBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(11L)));
|
||||
qualifierBuilder.setTimestamp(timeStamp);
|
||||
valueBuilder.addQualifierValue(qualifierBuilder.build());
|
||||
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2"));
|
||||
qualifierBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(22L)));
|
||||
valueBuilder.addQualifierValue(qualifierBuilder.build());
|
||||
mutateBuilder.addColumnValue(valueBuilder.build());
|
||||
|
||||
MutationProto proto = mutateBuilder.build();
|
||||
// default fields
|
||||
assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability());
|
||||
|
||||
// set the default value for equal comparison
|
||||
mutateBuilder = MutationProto.newBuilder(proto);
|
||||
mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT);
|
||||
|
||||
Increment increment = ProtobufUtil.toIncrement(proto, null);
|
||||
mutateBuilder.setTimestamp(increment.getTimestamp());
|
||||
mutateBuilder.setTimeRange(ProtobufUtil.toTimeRange(increment.getTimeRange()));
|
||||
assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.INCREMENT, increment));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test Put Mutate conversions.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testPut() throws IOException {
|
||||
MutationProto.Builder mutateBuilder = MutationProto.newBuilder();
|
||||
mutateBuilder.setRow(ByteString.copyFromUtf8("row"));
|
||||
mutateBuilder.setMutateType(MutationType.PUT);
|
||||
mutateBuilder.setTimestamp(111111);
|
||||
ColumnValue.Builder valueBuilder = ColumnValue.newBuilder();
|
||||
valueBuilder.setFamily(ByteString.copyFromUtf8("f1"));
|
||||
QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder();
|
||||
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1"));
|
||||
qualifierBuilder.setValue(ByteString.copyFromUtf8("v1"));
|
||||
valueBuilder.addQualifierValue(qualifierBuilder.build());
|
||||
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2"));
|
||||
qualifierBuilder.setValue(ByteString.copyFromUtf8("v2"));
|
||||
qualifierBuilder.setTimestamp(222222);
|
||||
valueBuilder.addQualifierValue(qualifierBuilder.build());
|
||||
mutateBuilder.addColumnValue(valueBuilder.build());
|
||||
|
||||
MutationProto proto = mutateBuilder.build();
|
||||
// default fields
|
||||
assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability());
|
||||
|
||||
// set the default value for equal comparison
|
||||
mutateBuilder = MutationProto.newBuilder(proto);
|
||||
mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT);
|
||||
|
||||
Put put = ProtobufUtil.toPut(proto);
|
||||
|
||||
// put value always use the default timestamp if no
|
||||
// value level timestamp specified,
|
||||
// add the timestamp to the original mutate
|
||||
long timestamp = put.getTimestamp();
|
||||
for (ColumnValue.Builder column:
|
||||
mutateBuilder.getColumnValueBuilderList()) {
|
||||
for (QualifierValue.Builder qualifier:
|
||||
column.getQualifierValueBuilderList()) {
|
||||
if (!qualifier.hasTimestamp()) {
|
||||
qualifier.setTimestamp(timestamp);
|
||||
}
|
||||
}
|
||||
}
|
||||
assertEquals(mutateBuilder.build(),
|
||||
ProtobufUtil.toMutation(MutationType.PUT, put));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test basic Scan conversions.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testScan() throws IOException {
|
||||
ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
|
||||
scanBuilder.setStartRow(ByteString.copyFromUtf8("row1"));
|
||||
scanBuilder.setStopRow(ByteString.copyFromUtf8("row2"));
|
||||
Column.Builder columnBuilder = Column.newBuilder();
|
||||
columnBuilder.setFamily(ByteString.copyFromUtf8("f1"));
|
||||
columnBuilder.addQualifier(ByteString.copyFromUtf8("c1"));
|
||||
columnBuilder.addQualifier(ByteString.copyFromUtf8("c2"));
|
||||
scanBuilder.addColumn(columnBuilder.build());
|
||||
|
||||
columnBuilder.clear();
|
||||
columnBuilder.setFamily(ByteString.copyFromUtf8("f2"));
|
||||
scanBuilder.addColumn(columnBuilder.build());
|
||||
|
||||
ClientProtos.Scan proto = scanBuilder.build();
|
||||
|
||||
// Verify default values
|
||||
assertEquals(1, proto.getMaxVersions());
|
||||
assertEquals(true, proto.getCacheBlocks());
|
||||
|
||||
// Verify fields survive ClientProtos.Scan -> Scan -> ClientProtos.Scan
|
||||
// conversion
|
||||
scanBuilder = ClientProtos.Scan.newBuilder(proto);
|
||||
scanBuilder.setMaxVersions(2);
|
||||
scanBuilder.setCacheBlocks(false);
|
||||
scanBuilder.setCaching(1024);
|
||||
scanBuilder.setTimeRange(ProtobufUtil.toTimeRange(TimeRange.allTime()));
|
||||
scanBuilder.setIncludeStopRow(false);
|
||||
ClientProtos.Scan expectedProto = scanBuilder.build();
|
||||
|
||||
ClientProtos.Scan actualProto = ProtobufUtil.toScan(
|
||||
ProtobufUtil.toScan(expectedProto));
|
||||
assertEquals(expectedProto, actualProto);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testToCell() throws Exception {
|
||||
KeyValue kv1 =
|
||||
new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]);
|
||||
KeyValue kv2 =
|
||||
new KeyValue(Bytes.toBytes("bbb"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]);
|
||||
KeyValue kv3 =
|
||||
new KeyValue(Bytes.toBytes("ccc"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), new byte[30]);
|
||||
byte[] arr = new byte[kv1.getLength() + kv2.getLength() + kv3.getLength()];
|
||||
System.arraycopy(kv1.getBuffer(), kv1.getOffset(), arr, 0, kv1.getLength());
|
||||
System.arraycopy(kv2.getBuffer(), kv2.getOffset(), arr, kv1.getLength(), kv2.getLength());
|
||||
System.arraycopy(kv3.getBuffer(), kv3.getOffset(), arr, kv1.getLength() + kv2.getLength(),
|
||||
kv3.getLength());
|
||||
ByteBuffer dbb = ByteBuffer.allocateDirect(arr.length);
|
||||
dbb.put(arr);
|
||||
ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength());
|
||||
CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV);
|
||||
Cell newOffheapKV =
|
||||
ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell);
|
||||
assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMetaRegionState() throws Exception {
|
||||
ServerName serverName = ServerName.valueOf("localhost", 1234, 5678);
|
||||
// New region state style.
|
||||
for (RegionState.State state: RegionState.State.values()) {
|
||||
RegionState regionState =
|
||||
new RegionState(RegionInfoBuilder.FIRST_META_REGIONINFO, state, serverName);
|
||||
MetaRegionServer metars = MetaRegionServer.newBuilder()
|
||||
.setServer(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(serverName))
|
||||
.setRpcVersion(HConstants.RPC_CURRENT_VERSION)
|
||||
.setState(state.convert()).build();
|
||||
// Serialize
|
||||
byte[] data = ProtobufUtil.prependPBMagic(metars.toByteArray());
|
||||
ProtobufUtil.prependPBMagic(data);
|
||||
// Deserialize
|
||||
RegionState regionStateNew =
|
||||
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.parseMetaRegionStateFrom(data, 1);
|
||||
assertEquals(regionState.getServerName(), regionStateNew.getServerName());
|
||||
assertEquals(regionState.getState(), regionStateNew.getState());
|
||||
}
|
||||
// old style.
|
||||
RegionState rs =
|
||||
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.parseMetaRegionStateFrom(
|
||||
serverName.getVersionedBytes(), 1);
|
||||
assertEquals(serverName, rs.getServerName());
|
||||
assertEquals(rs.getState(), RegionState.State.OPEN);
|
||||
}
|
||||
}
|
|
@ -17,7 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.protobuf;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.client.Result;
|
|||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SecurityTests;
|
||||
|
@ -48,6 +47,8 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.TestName;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
|
||||
|
||||
@Category({SecurityTests.class, MediumTests.class})
|
||||
public class TestVisibilityLabelsWithSLGStack {
|
||||
|
||||
|
|
|
@ -55,12 +55,6 @@
|
|||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
<type>jar</type>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-client</artifactId>
|
||||
|
|
|
@ -165,10 +165,6 @@
|
|||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-client</artifactId>
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
|||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.security.UserProvider;
|
||||
import org.apache.hadoop.hbase.security.visibility.ScanLabelGenerator;
|
||||
|
@ -73,6 +72,8 @@ import org.junit.experimental.categories.Category;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
|
||||
|
||||
@Category({ClientTests.class, MediumTests.class})
|
||||
public class TestThriftHBaseServiceHandlerWithLabels {
|
||||
|
||||
|
|
6
pom.xml
6
pom.xml
|
@ -70,7 +70,6 @@
|
|||
<module>hbase-thrift</module>
|
||||
<module>hbase-shell</module>
|
||||
<module>hbase-protocol-shaded</module>
|
||||
<module>hbase-protocol</module>
|
||||
<module>hbase-client</module>
|
||||
<module>hbase-hadoop-compat</module>
|
||||
<module>hbase-common</module>
|
||||
|
@ -1682,11 +1681,6 @@
|
|||
<artifactId>hbase-protocol-shaded</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-protocol</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-procedure</artifactId>
|
||||
|
|
Loading…
Reference in New Issue