HBASE-5443 Create PB protocols for HRegionInterface
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1307625 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
229cf740b8
commit
c7c1dbeb65
|
@ -120,8 +120,9 @@ implements Configurable {
|
|||
*/
|
||||
@Override
|
||||
public void setConf(Configuration configuration) {
|
||||
this.conf = HBaseConfiguration.create(configuration);
|
||||
this.conf = configuration; //HBaseConfiguration.create(configuration);
|
||||
try {
|
||||
HBaseConfiguration.addHbaseResources(conf);
|
||||
this.table = new HTable(this.conf,
|
||||
configuration.get(TableOutputFormat.OUTPUT_TABLE));
|
||||
} catch (IOException e) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,30 @@
|
|||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
|
||||
<html>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<head />
|
||||
<body bgcolor="white">
|
||||
Holds classes generated from <a href="http://code.google.com/apis/protocolbuffers/">protobuf</a>
|
||||
<code>src/main/protobuf</code> definition files.
|
||||
|
||||
<p>See under <code>src/main/protobuf</code> for instruction on how to generate the content under
|
||||
the <code>generated</code> subpackage.
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,27 @@
|
|||
These are the protobuf definition files used by hbase. The produced java
|
||||
classes are generated into src/main/java/org/apache/hadoop/hbase/protobuf/generated
|
||||
and then checked in. The reasoning is that they change infrequently.
|
||||
|
||||
To regnerate the classes after making definition file changes, ensure first that
|
||||
the protobuf protoc tool is in your $PATH (You may need to download it and build
|
||||
it first; its part of the protobuf package obtainable from here:
|
||||
http://code.google.com/p/protobuf/downloads/list). Then run the following (You
|
||||
should be able to just copy and paste the below into a terminal and hit return
|
||||
-- the protoc compiler runs fast):
|
||||
|
||||
UNIX_PROTO_DIR=src/main/protobuf
|
||||
JAVA_DIR=src/main/java/
|
||||
mkdir -p $JAVA_DIR 2> /dev/null
|
||||
if which cygpath 2> /dev/null; then
|
||||
PROTO_DIR=`cygpath --windows $UNIX_PROTO_DIR`
|
||||
JAVA_DIR=`cygpath --windows $JAVA_DIR`
|
||||
else
|
||||
PROTO_DIR=$UNIX_PROTO_DIR
|
||||
fi
|
||||
for PROTO_FILE in $UNIX_PROTO_DIR/*.proto
|
||||
do
|
||||
protoc -I$PROTO_DIR --java_out=$JAVA_DIR $PROTO_FILE
|
||||
done
|
||||
|
||||
After you've done the above, check it in and then check it in (or post a patch
|
||||
on a JIRA with your definition file changes and the generated files).
|
|
@ -0,0 +1,236 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// This file contains protocol buffers that are used for RegionAdmin service.
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "RegionAdminProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "hbase.proto";
|
||||
|
||||
message GetRegionInfoRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
}
|
||||
|
||||
message GetRegionInfoResponse {
|
||||
required RegionInfo regionInfo = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of store files for a set of column families in a particular region.
|
||||
* If no column family is specified, get the store files for all column families.
|
||||
*/
|
||||
message GetStoreFileListRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
repeated bytes columnFamily = 2;
|
||||
}
|
||||
|
||||
message GetStoreFileListResponse {
|
||||
repeated string storeFile = 1;
|
||||
}
|
||||
|
||||
message GetOnlineRegionRequest {
|
||||
}
|
||||
|
||||
message GetOnlineRegionResponse {
|
||||
repeated RegionInfo regionInfo = 1;
|
||||
}
|
||||
|
||||
message OpenRegionRequest {
|
||||
repeated RegionSpecifier region = 1;
|
||||
optional uint32 versionOfOfflineNode = 2;
|
||||
}
|
||||
|
||||
message OpenRegionResponse {
|
||||
repeated RegionOpeningState openingState = 1;
|
||||
|
||||
enum RegionOpeningState {
|
||||
OPENED = 0;
|
||||
ALREADY_OPENED = 1;
|
||||
FAILED_OPENING = 2;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the specified region and will use or not use ZK during the close
|
||||
* according to the specified flag.
|
||||
*/
|
||||
message CloseRegionRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
optional uint32 versionOfClosingNode = 2;
|
||||
optional bool transitionInZK = 3 [default = true];
|
||||
}
|
||||
|
||||
message CloseRegionResponse {
|
||||
required bool closed = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes the MemStore of the specified region.
|
||||
* <p>
|
||||
* This method is synchronous.
|
||||
*/
|
||||
message FlushRegionRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
optional uint64 ifOlderThanTs = 2;
|
||||
}
|
||||
|
||||
message FlushRegionResponse {
|
||||
required uint64 lastFlushTime = 1;
|
||||
optional bool flushed = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Splits the specified region.
|
||||
* <p>
|
||||
* This method currently flushes the region and then forces a compaction which
|
||||
* will then trigger a split. The flush is done synchronously but the
|
||||
* compaction is asynchronous.
|
||||
*/
|
||||
message SplitRegionRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
optional bytes splitPoint = 2;
|
||||
}
|
||||
|
||||
message SplitRegionResponse {
|
||||
}
|
||||
|
||||
/**
|
||||
* Compacts the specified region. Performs a major compaction if specified.
|
||||
* <p>
|
||||
* This method is asynchronous.
|
||||
*/
|
||||
message CompactRegionRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
optional bool major = 2;
|
||||
}
|
||||
|
||||
message CompactRegionResponse {
|
||||
}
|
||||
|
||||
message UUID {
|
||||
required uint64 leastSigBits = 1;
|
||||
required uint64 mostSigBits = 2;
|
||||
}
|
||||
|
||||
// Protocol buffer version of HLog
|
||||
message WALEntry {
|
||||
required WALKey walKey = 1;
|
||||
required WALEdit edit = 2;
|
||||
|
||||
// Protocol buffer version of HLogKey
|
||||
message WALKey {
|
||||
required bytes encodedRegionName = 1;
|
||||
required bytes tableName = 2;
|
||||
required uint64 logSequenceNumber = 3;
|
||||
required uint64 writeTime = 4;
|
||||
optional UUID clusterId = 5;
|
||||
}
|
||||
|
||||
message WALEdit {
|
||||
repeated KeyValue keyValue = 1;
|
||||
repeated FamilyScope familyScope = 2;
|
||||
|
||||
enum ScopeType {
|
||||
REPLICATION_SCOPE_LOCAL = 0;
|
||||
REPLICATION_SCOPE_GLOBAL = 1;
|
||||
}
|
||||
|
||||
message FamilyScope {
|
||||
required bytes family = 1;
|
||||
required ScopeType scopeType = 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Replicates the given entries. The guarantee is that the given entries
|
||||
* will be durable on the slave cluster if this method returns without
|
||||
* any exception.
|
||||
* hbase.replication has to be set to true for this to work.
|
||||
*/
|
||||
message ReplicateWALEntryRequest {
|
||||
repeated WALEntry walEntry = 1;
|
||||
}
|
||||
|
||||
message ReplicateWALEntryResponse {
|
||||
}
|
||||
|
||||
// Replacement for rollHLogWriter in HRegionInterface
|
||||
message RollWALWriterRequest {
|
||||
}
|
||||
|
||||
message RollWALWriterResponse {
|
||||
// A list of encoded name of regions to flush
|
||||
repeated bytes regionToFlush = 1;
|
||||
}
|
||||
|
||||
message StopServerRequest {
|
||||
required string reason = 1;
|
||||
}
|
||||
|
||||
message StopServerResponse {
|
||||
}
|
||||
|
||||
message GetServerInfoRequest {
|
||||
}
|
||||
|
||||
message GetServerInfoResponse {
|
||||
required ServerName serverName = 1;
|
||||
}
|
||||
|
||||
service RegionAdminService {
|
||||
rpc getRegionInfo(GetRegionInfoRequest)
|
||||
returns(GetRegionInfoResponse);
|
||||
|
||||
rpc getStoreFileList(GetStoreFileListRequest)
|
||||
returns(GetStoreFileListResponse);
|
||||
|
||||
rpc getOnlineRegion(GetOnlineRegionRequest)
|
||||
returns(GetOnlineRegionResponse);
|
||||
|
||||
rpc openRegion(OpenRegionRequest)
|
||||
returns(OpenRegionResponse);
|
||||
|
||||
rpc closeRegion(CloseRegionRequest)
|
||||
returns(CloseRegionResponse);
|
||||
|
||||
rpc flushRegion(FlushRegionRequest)
|
||||
returns(FlushRegionResponse);
|
||||
|
||||
rpc splitRegion(SplitRegionRequest)
|
||||
returns(SplitRegionResponse);
|
||||
|
||||
rpc compactRegion(CompactRegionRequest)
|
||||
returns(CompactRegionResponse);
|
||||
|
||||
rpc replicateWALEntry(ReplicateWALEntryRequest)
|
||||
returns(ReplicateWALEntryResponse);
|
||||
|
||||
rpc rollWALWriter(RollWALWriterRequest)
|
||||
returns(RollWALWriterResponse);
|
||||
|
||||
rpc getServerInfo(GetServerInfoRequest)
|
||||
returns(GetServerInfoResponse);
|
||||
|
||||
rpc stopServer(StopServerRequest)
|
||||
returns(StopServerResponse);
|
||||
}
|
|
@ -0,0 +1,372 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// This file contains protocol buffers that are used for RegionClient service.
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "RegionClientProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "hbase.proto";
|
||||
|
||||
/**
|
||||
* Container for a list of column qualifier names of a family.
|
||||
*/
|
||||
message Column {
|
||||
required bytes family = 1;
|
||||
repeated bytes qualifier = 2;
|
||||
}
|
||||
|
||||
message Attribute {
|
||||
required string name = 1;
|
||||
optional bytes value = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* The protocol buffer version of Get
|
||||
*/
|
||||
message Get {
|
||||
required bytes row = 1;
|
||||
repeated Column column = 2;
|
||||
repeated Attribute attribute = 3;
|
||||
optional uint64 lockId = 4;
|
||||
optional Parameter filter = 5;
|
||||
optional TimeRange timeRange = 6;
|
||||
optional uint32 maxVersions = 7 [default = 1];
|
||||
optional bool cacheBlocks = 8 [default = true];
|
||||
}
|
||||
|
||||
message Result {
|
||||
repeated KeyValue value = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* The get request. Perform a single Get operation.
|
||||
* Unless existenceOnly is specified, return all the requested data
|
||||
* for the row that matches exactly, or the one that immediately
|
||||
* precedes it if closestRowBefore is specified.
|
||||
*
|
||||
* If existenceOnly is set, only the existence will be returned.
|
||||
*/
|
||||
message GetRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
required Get get = 2;
|
||||
|
||||
// If the row to get doesn't exist, return the
|
||||
// closest row before.
|
||||
optional bool closestRowBefore = 3;
|
||||
|
||||
// The result isn't asked for, just check for
|
||||
// the existence. If specified, closestRowBefore
|
||||
// will be ignored
|
||||
optional bool existenceOnly = 4;
|
||||
}
|
||||
|
||||
message GetResponse {
|
||||
optional Result result = 1;
|
||||
|
||||
// used for Get to check existence only
|
||||
optional bool exists = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Condition to check if the value of a given cell (row,
|
||||
* family, qualifier) matches a value via a given comparator.
|
||||
* The value is optional since some comparator may not require
|
||||
* a value to compare, for example, checking null.
|
||||
*
|
||||
* Condition is used in check and mutate operations.
|
||||
*/
|
||||
message Condition {
|
||||
required bytes row = 1;
|
||||
required bytes family = 2;
|
||||
required bytes qualifier = 3;
|
||||
required CompareType compareType = 4;
|
||||
required Comparator comparator = 5;
|
||||
optional bytes value = 6;
|
||||
|
||||
enum CompareType {
|
||||
LESS = 0;
|
||||
LESS_OR_EQUAL = 1;
|
||||
EQUAL = 2;
|
||||
NOT_EQUAL = 3;
|
||||
GREATER_OR_EQUAL = 4;
|
||||
GREATER = 5;
|
||||
NO_OP = 6;
|
||||
}
|
||||
|
||||
enum Comparator {
|
||||
BINARY_COMPARATOR = 0;
|
||||
BINARY_PREFIX_COMPARATOR = 1;
|
||||
BIT_AND_COMPARATOR = 2;
|
||||
BIT_OR_COMPARATOR = 3;
|
||||
BIT_XOR_COMPARATOR = 4;
|
||||
NULL_COMPARATOR = 5;
|
||||
REGEX_STRING_COMPARATOR = 6;
|
||||
SUBSTRING_COMPARATOR = 7;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A specific mutate inside a mutate request.
|
||||
* It can be an append, increment, put or delete based
|
||||
* on the mutate type.
|
||||
*/
|
||||
message Mutate {
|
||||
required bytes row = 1;
|
||||
required MutateType mutateType = 2;
|
||||
repeated ColumnValue columnValue = 3;
|
||||
repeated Attribute attribute = 4;
|
||||
optional uint64 timestamp = 5;
|
||||
optional uint64 lockId = 6;
|
||||
optional bool writeToWAL = 7 [default = true];
|
||||
|
||||
// For some mutate, result may be returned, in which case,
|
||||
// time range can be specified for potential performance gain
|
||||
optional TimeRange timeRange = 10;
|
||||
|
||||
enum MutateType {
|
||||
APPEND = 0;
|
||||
INCREMENT = 1;
|
||||
PUT = 2;
|
||||
DELETE = 3;
|
||||
DELETE_COLUMN = 4;
|
||||
DELETE_FAMILY = 5;
|
||||
}
|
||||
|
||||
message ColumnValue {
|
||||
required bytes family = 1;
|
||||
repeated QualifierValue qualifierValue = 2;
|
||||
|
||||
// Default timestamp for qalifier values,
|
||||
// or timestamp of the column family to be deleted
|
||||
optional uint64 timestamp = 3;
|
||||
|
||||
message QualifierValue {
|
||||
required bytes qualifier = 1;
|
||||
optional bytes value = 2;
|
||||
optional uint64 timestamp = 3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The mutate request. Perform a single Mutate operation.
|
||||
*
|
||||
* Optionally, you can specify a condition. The mutate
|
||||
* will take place only if the condition is met. Otherwise,
|
||||
* the mutate will be ignored. In the response result,
|
||||
* parameter processed is used to indicate if the mutate
|
||||
* actually happened.
|
||||
*/
|
||||
message MutateRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
required Mutate mutate = 2;
|
||||
optional Condition condition = 3;
|
||||
}
|
||||
|
||||
message MutateResponse {
|
||||
optional Result result = 1;
|
||||
|
||||
// used for mutate to indicate processed only
|
||||
optional bool processed = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Instead of get from a table, you can scan it with optional filters.
|
||||
* You can specify the row key range, time range, the columns/families
|
||||
* to scan and so on.
|
||||
*
|
||||
* This scan is used the first time in a scan request. The response of
|
||||
* the initial scan will return a scanner id, which should be used to
|
||||
* fetch result batches later on before it is closed.
|
||||
*/
|
||||
message Scan {
|
||||
repeated Column column = 1;
|
||||
repeated Attribute attribute = 2;
|
||||
optional bytes startRow = 3;
|
||||
optional bytes stopRow = 4;
|
||||
optional Parameter filter = 5;
|
||||
optional TimeRange timeRange = 6;
|
||||
optional uint32 maxVersions = 7 [default = 1];
|
||||
optional bool cacheBlocks = 8 [default = true];
|
||||
optional uint32 rowsToCache = 9;
|
||||
optional uint32 batchSize = 10;
|
||||
}
|
||||
|
||||
/**
|
||||
* A scan request. Initially, it should specify a scan. Later on, you
|
||||
* can use the scanner id returned to fetch result batches with a different
|
||||
* scan request.
|
||||
*
|
||||
* The scanner will remain open if there are more results, and it's not
|
||||
* asked to be closed explicitly.
|
||||
*
|
||||
* You can fetch the results and ask the scanner to be closed to save
|
||||
* a trip if you are not interested in remaining results.
|
||||
*/
|
||||
message ScanRequest {
|
||||
optional uint64 scannerId = 1;
|
||||
optional Scan scan = 2;
|
||||
optional uint32 numberOfRows = 3;
|
||||
optional bool closeScanner = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* The scan response. If there are no more results, moreResults will
|
||||
* be false. If it is not specified, it means there are more.
|
||||
*/
|
||||
message ScanResponse {
|
||||
repeated Result result = 1;
|
||||
optional uint64 scannerId = 2;
|
||||
optional bool moreResults = 3;
|
||||
optional uint32 ttl = 4;
|
||||
}
|
||||
|
||||
message LockRowRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
repeated bytes row = 2;
|
||||
}
|
||||
|
||||
message LockRowResponse {
|
||||
required uint64 lockId = 1;
|
||||
optional uint32 ttl = 2;
|
||||
}
|
||||
|
||||
message UnlockRowRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
required uint64 lockId = 2;
|
||||
}
|
||||
|
||||
message UnlockRowResponse {
|
||||
}
|
||||
|
||||
/**
|
||||
* Atomically bulk load multiple HFiles (say from different column families)
|
||||
* into an open region.
|
||||
*/
|
||||
message BulkLoadHFileRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
repeated FamilyPath familyPath = 2;
|
||||
|
||||
message FamilyPath {
|
||||
required bytes family = 1;
|
||||
required string path = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message BulkLoadHFileResponse {
|
||||
required bool loaded = 1;
|
||||
}
|
||||
|
||||
message Parameter {
|
||||
required string type = 1;
|
||||
optional bytes binaryValue = 2;
|
||||
}
|
||||
|
||||
message Property {
|
||||
required string name = 1;
|
||||
required string value = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* An individual coprocessor call. You must specify the protocol,
|
||||
* the method, and the row to which the call will be executed.
|
||||
*
|
||||
* You can specify the configuration settings in the property list.
|
||||
*
|
||||
* The parameter list has the parameters used for the method.
|
||||
* A parameter is a pair of parameter name and the binary parameter
|
||||
* value. The name is the parameter class name. The value is the
|
||||
* binary format of the parameter, for example, protocol buffer
|
||||
* encoded value.
|
||||
*/
|
||||
message Exec {
|
||||
required bytes row = 1;
|
||||
required string protocolName = 2;
|
||||
required string methodName = 3;
|
||||
repeated Property property = 4;
|
||||
repeated Parameter parameter = 5;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a single {@link org.apache.hadoop.hbase.ipc.CoprocessorProtocol}
|
||||
* method using the registered protocol handlers.
|
||||
* {@link CoprocessorProtocol} implementations must be registered via the
|
||||
* {@link org.apache.hadoop.hbase.regionserver.HRegion#registerProtocol(
|
||||
* Class, org.apache.hadoop.hbase.ipc.CoprocessorProtocol)}
|
||||
* method before they are available.
|
||||
*/
|
||||
message ExecCoprocessorRequest {
|
||||
required RegionSpecifier region = 1;
|
||||
required Exec call = 2;
|
||||
}
|
||||
|
||||
message ExecCoprocessorResponse {
|
||||
required bytes regionName = 1;
|
||||
required Parameter value = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* You can execute a list of actions on regions assigned
|
||||
* to the same region server, if you can't find an individual
|
||||
* call which meets your requirement.
|
||||
*
|
||||
* The multi request can have a list of requests. Each request
|
||||
* should be a protocol buffer encoded request such as GetRequest,
|
||||
* MutateRequest, ExecCoprocessorRequest.
|
||||
*
|
||||
* If the list contains multiple mutate requests only, atomic can
|
||||
* be set to make sure they can be processed atomically.
|
||||
*/
|
||||
message MultiRequest {
|
||||
repeated Parameter request = 1;
|
||||
optional bool atomic = 2;
|
||||
}
|
||||
|
||||
message MultiResponse {
|
||||
repeated Parameter response = 1;
|
||||
}
|
||||
|
||||
service RegionClientService {
|
||||
rpc get(GetRequest)
|
||||
returns(GetResponse);
|
||||
|
||||
rpc mutate(MutateRequest)
|
||||
returns(MutateResponse);
|
||||
|
||||
rpc scan(ScanRequest)
|
||||
returns(ScanResponse);
|
||||
|
||||
rpc lockRow(LockRowRequest)
|
||||
returns(LockRowResponse);
|
||||
|
||||
rpc unlockRow(UnlockRowRequest)
|
||||
returns(UnlockRowResponse);
|
||||
|
||||
rpc bulkLoadHFile(BulkLoadHFileRequest)
|
||||
returns(BulkLoadHFileResponse);
|
||||
|
||||
rpc execCoprocessor(ExecCoprocessorRequest)
|
||||
returns(ExecCoprocessorResponse);
|
||||
|
||||
rpc multi(MultiRequest)
|
||||
returns(MultiResponse);
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// This file contains protocol buffers that are shared throughout HBase
|
||||
|
||||
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||
option java_outer_classname = "HBaseProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
/**
|
||||
* Protocol buffer version of HRegionInfo.
|
||||
*/
|
||||
message RegionInfo {
|
||||
required uint64 regionId = 1;
|
||||
required bytes tableName = 2;
|
||||
optional bytes startKey = 3;
|
||||
optional bytes endKey = 4;
|
||||
optional bool offline = 5;
|
||||
optional bool split = 6;
|
||||
}
|
||||
|
||||
/**
|
||||
* Container protocol buffer to specify a region.
|
||||
* You can specify region by region name, or the hash
|
||||
* of the region name, which is known as encoded
|
||||
* region name.
|
||||
*/
|
||||
message RegionSpecifier {
|
||||
required RegionSpecifierType type = 1;
|
||||
required bytes value = 2;
|
||||
|
||||
enum RegionSpecifierType {
|
||||
// <tablename>,<startkey>,<regionId>.<encodedName>
|
||||
REGION_NAME = 1;
|
||||
|
||||
// hash of <tablename>,<startkey>,<regionId>
|
||||
ENCODED_REGION_NAME = 2;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A range of time. Both from and to are Java time
|
||||
* stamp in milliseconds. If you don't specify a time
|
||||
* range, it means all time. By default, if not
|
||||
* specified, from = 0, and to = Long.MAX_VALUE
|
||||
*/
|
||||
message TimeRange {
|
||||
optional uint64 from = 1;
|
||||
optional uint64 to = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* The type of the key in a KeyValue.
|
||||
*/
|
||||
enum KeyType {
|
||||
MINIMUM = 0;
|
||||
PUT = 4;
|
||||
|
||||
DELETE = 8;
|
||||
DELETE_COLUMN = 12;
|
||||
DELETE_FAMILY = 14;
|
||||
|
||||
// MAXIMUM is used when searching; you look from maximum on down.
|
||||
MAXIMUM = 255;
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol buffer version of KeyValue.
|
||||
* It doesn't have those transient parameters
|
||||
*/
|
||||
message KeyValue {
|
||||
required bytes row = 1;
|
||||
required bytes family = 2;
|
||||
required bytes qualifier = 3;
|
||||
optional uint64 timestamp = 4;
|
||||
optional KeyType keyType = 5;
|
||||
optional bytes value = 6;
|
||||
}
|
||||
|
||||
/**
|
||||
* Protocol buffer version of ServerName
|
||||
*/
|
||||
message ServerName {
|
||||
required string hostName = 1;
|
||||
optional uint32 port = 2;
|
||||
optional uint64 startCode = 3;
|
||||
}
|
Loading…
Reference in New Issue