HBASE-8788 Edit of .proto files moving classes to better homes

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1495642 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-06-22 00:15:11 +00:00
parent e67e74c02e
commit 8781231132
30 changed files with 5674 additions and 5604 deletions

View File

@ -21,7 +21,7 @@
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Strings;
@ -31,9 +31,9 @@ import org.apache.hadoop.hbase.util.Strings;
@InterfaceAudience.Private
public class RegionLoad {
protected HBaseProtos.RegionLoad regionLoadPB;
protected ClusterStatusProtos.RegionLoad regionLoadPB;
public RegionLoad(HBaseProtos.RegionLoad regionLoadPB) {
public RegionLoad(ClusterStatusProtos.RegionLoad regionLoadPB) {
this.regionLoadPB = regionLoadPB;
}
@ -203,4 +203,4 @@ public class RegionLoad {
compactionProgressPct);
return sb.toString();
}
}
}

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
import org.apache.hadoop.hbase.util.Bytes;
@ -53,9 +54,9 @@ public class ServerLoad {
private long totalCompactingKVs = 0;
private long currentCompactedKVs = 0;
public ServerLoad(HBaseProtos.ServerLoad serverLoad) {
public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
this.serverLoad = serverLoad;
for (HBaseProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) {
for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) {
stores += rl.getStores();
storefiles += rl.getStorefiles();
storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB();
@ -76,11 +77,11 @@ public class ServerLoad {
// NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because
// HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967).
/* @return the underlying ServerLoad protobuf object */
public HBaseProtos.ServerLoad obtainServerLoadPB() {
public ClusterStatusProtos.ServerLoad obtainServerLoadPB() {
return serverLoad;
}
protected HBaseProtos.ServerLoad serverLoad;
protected ClusterStatusProtos.ServerLoad serverLoad;
/* @return number of requests since last report. */
public int getNumberOfRequests() {
@ -200,7 +201,7 @@ public class ServerLoad {
public Map<byte[], RegionLoad> getRegionsLoad() {
Map<byte[], RegionLoad> regionLoads =
new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
for (HBaseProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) {
for (ClusterStatusProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) {
RegionLoad regionLoad = new RegionLoad(rl);
regionLoads.put(regionLoad.getName(), regionLoad);
}
@ -299,5 +300,5 @@ public class ServerLoad {
}
public static final ServerLoad EMPTY_SERVERLOAD =
new ServerLoad(HBaseProtos.ServerLoad.newBuilder().build());
new ServerLoad(ClusterStatusProtos.ServerLoad.newBuilder().build());
}

View File

@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
@ -339,8 +339,8 @@ public class ServerName implements Comparable<ServerName> {
if (ProtobufUtil.isPBMagicPrefix(data)) {
int prefixLen = ProtobufUtil.lengthOfPBMagic();
try {
RootRegionServer rss =
RootRegionServer.newBuilder().mergeFrom(data, prefixLen, data.length - prefixLen).build();
MetaRegionServer rss =
MetaRegionServer.newBuilder().mergeFrom(data, prefixLen, data.length - prefixLen).build();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = rss.getServer();
return new ServerName(sn.getHostName(), sn.getPort(), sn.getStartCode());
} catch (InvalidProtocolBufferException e) {

View File

@ -25,13 +25,10 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import com.google.protobuf.InvalidProtocolBufferException;
@ -55,7 +52,6 @@ public class FilterList extends Filter {
MUST_PASS_ONE
}
private static final Configuration conf = HBaseConfiguration.create();
private static final int MAX_LOG_FILTERS = 5;
private Operator operator = Operator.MUST_PASS_ALL;
private List<Filter> filters = new ArrayList<Filter>();
@ -306,7 +302,7 @@ public class FilterList extends Filter {
List<Filter> rowFilters = new ArrayList<Filter>(proto.getFiltersCount());
try {
for (HBaseProtos.Filter filter : proto.getFiltersList()) {
for (FilterProtos.Filter filter : proto.getFiltersList()) {
rowFilters.add(ProtobufUtil.toFilter(filter));
}
} catch (IOException ioe) {

View File

@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
@ -97,10 +98,11 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Del
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
@ -383,7 +385,7 @@ public final class ProtobufUtil {
get.setTimeRange(minStamp, maxStamp);
}
if (proto.hasFilter()) {
HBaseProtos.Filter filter = proto.getFilter();
FilterProtos.Filter filter = proto.getFilter();
get.setFilter(ProtobufUtil.toFilter(filter));
}
for (NameBytesPair attribute: proto.getAttributeList()) {
@ -822,7 +824,7 @@ public final class ProtobufUtil {
scan.setTimeRange(minStamp, maxStamp);
}
if (proto.hasFilter()) {
HBaseProtos.Filter filter = proto.getFilter();
FilterProtos.Filter filter = proto.getFilter();
scan.setFilter(ProtobufUtil.toFilter(filter));
}
if (proto.hasBatchSize()) {
@ -1058,9 +1060,9 @@ public final class ProtobufUtil {
* @return the converted client Result
*/
public static Result toResult(final ClientProtos.Result proto) {
List<HBaseProtos.Cell> values = proto.getCellList();
List<CellProtos.Cell> values = proto.getCellList();
List<Cell> cells = new ArrayList<Cell>(values.size());
for (HBaseProtos.Cell c: values) {
for (CellProtos.Cell c: values) {
cells.add(toCell(c));
}
return new Result(cells);
@ -1086,9 +1088,9 @@ public final class ProtobufUtil {
cells.add(scanner.current());
}
}
List<HBaseProtos.Cell> values = proto.getCellList();
List<CellProtos.Cell> values = proto.getCellList();
if (cells == null) cells = new ArrayList<Cell>(values.size());
for (HBaseProtos.Cell c: values) {
for (CellProtos.Cell c: values) {
cells.add(toCell(c));
}
return new Result(cells);
@ -1139,7 +1141,7 @@ public final class ProtobufUtil {
* @return the converted Filter
*/
@SuppressWarnings("unchecked")
public static Filter toFilter(HBaseProtos.Filter proto) throws IOException {
public static Filter toFilter(FilterProtos.Filter proto) throws IOException {
String type = proto.getName();
final byte [] value = proto.getSerializedFilter().toByteArray();
String funcName = "parseFrom";
@ -1162,8 +1164,8 @@ public final class ProtobufUtil {
* @param filter the Filter to convert
* @return the converted protocol buffer Filter
*/
public static HBaseProtos.Filter toFilter(Filter filter) throws IOException {
HBaseProtos.Filter.Builder builder = HBaseProtos.Filter.newBuilder();
public static FilterProtos.Filter toFilter(Filter filter) throws IOException {
FilterProtos.Filter.Builder builder = FilterProtos.Filter.newBuilder();
builder.setName(filter.getClass().getName());
builder.setSerializedFilter(ByteString.copyFrom(filter.toByteArray()));
return builder.build();
@ -1960,23 +1962,23 @@ public final class ProtobufUtil {
throw new IOException(se);
}
public static HBaseProtos.Cell toCell(final Cell kv) {
public static CellProtos.Cell toCell(final Cell kv) {
// Doing this is going to kill us if we do it for all data passed.
// St.Ack 20121205
HBaseProtos.Cell.Builder kvbuilder = HBaseProtos.Cell.newBuilder();
CellProtos.Cell.Builder kvbuilder = CellProtos.Cell.newBuilder();
kvbuilder.setRow(ByteString.copyFrom(kv.getRowArray(), kv.getRowOffset(),
kv.getRowLength()));
kvbuilder.setFamily(ByteString.copyFrom(kv.getFamilyArray(),
kv.getFamilyOffset(), kv.getFamilyLength()));
kvbuilder.setQualifier(ByteString.copyFrom(kv.getQualifierArray(),
kv.getQualifierOffset(), kv.getQualifierLength()));
kvbuilder.setCellType(HBaseProtos.CellType.valueOf(kv.getTypeByte()));
kvbuilder.setCellType(CellProtos.CellType.valueOf(kv.getTypeByte()));
kvbuilder.setTimestamp(kv.getTimestamp());
kvbuilder.setValue(ByteString.copyFrom(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
return kvbuilder.build();
}
public static Cell toCell(final HBaseProtos.Cell cell) {
public static Cell toCell(final CellProtos.Cell cell) {
// Doing this is going to kill us if we do it for all data passed.
// St.Ack 20121205
return CellUtil.createCell(cell.getRow().toByteArray(),

View File

@ -141,8 +141,8 @@ public class MetaRegionTracker extends ZooKeeperNodeTracker {
HBaseProtos.ServerName pbsn =
HBaseProtos.ServerName.newBuilder().setHostName(sn.getHostname()).
setPort(sn.getPort()).setStartCode(sn.getStartcode()).build();
ZooKeeperProtos.RootRegionServer pbrsr =
ZooKeeperProtos.RootRegionServer.newBuilder().setServer(pbsn).build();
ZooKeeperProtos.MetaRegionServer pbrsr =
ZooKeeperProtos.MetaRegionServer.newBuilder().setServer(pbsn).build();
return ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
}

View File

@ -530,8 +530,8 @@ public final class ClientProtos {
// optional .Filter filter = 4;
boolean hasFilter();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder();
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter();
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder();
// optional .TimeRange timeRange = 5;
boolean hasTimeRange();
@ -637,14 +637,14 @@ public final class ClientProtos {
// optional .Filter filter = 4;
public static final int FILTER_FIELD_NUMBER = 4;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter filter_;
private org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter filter_;
public boolean hasFilter() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter() {
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter() {
return filter_;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder() {
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder() {
return filter_;
}
@ -705,7 +705,7 @@ public final class ClientProtos {
row_ = com.google.protobuf.ByteString.EMPTY;
column_ = java.util.Collections.emptyList();
attribute_ = java.util.Collections.emptyList();
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
maxVersions_ = 1;
cacheBlocks_ = true;
@ -1060,7 +1060,7 @@ public final class ClientProtos {
attributeBuilder_.clear();
}
if (filterBuilder_ == null) {
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
} else {
filterBuilder_.clear();
}
@ -1331,7 +1331,7 @@ public final class ClientProtos {
break;
}
case 34: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.newBuilder();
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.newBuilder();
if (hasFilter()) {
subBuilder.mergeFrom(getFilter());
}
@ -1771,20 +1771,20 @@ public final class ClientProtos {
}
// optional .Filter filter = 4;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
private org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder> filterBuilder_;
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder> filterBuilder_;
public boolean hasFilter() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter() {
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter() {
if (filterBuilder_ == null) {
return filter_;
} else {
return filterBuilder_.getMessage();
}
}
public Builder setFilter(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter value) {
public Builder setFilter(org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter value) {
if (filterBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@ -1798,7 +1798,7 @@ public final class ClientProtos {
return this;
}
public Builder setFilter(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder builderForValue) {
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder builderForValue) {
if (filterBuilder_ == null) {
filter_ = builderForValue.build();
onChanged();
@ -1808,12 +1808,12 @@ public final class ClientProtos {
bitField0_ |= 0x00000008;
return this;
}
public Builder mergeFilter(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter value) {
public Builder mergeFilter(org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter value) {
if (filterBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
filter_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance()) {
filter_ != org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance()) {
filter_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.newBuilder(filter_).mergeFrom(value).buildPartial();
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.newBuilder(filter_).mergeFrom(value).buildPartial();
} else {
filter_ = value;
}
@ -1826,7 +1826,7 @@ public final class ClientProtos {
}
public Builder clearFilter() {
if (filterBuilder_ == null) {
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
onChanged();
} else {
filterBuilder_.clear();
@ -1834,12 +1834,12 @@ public final class ClientProtos {
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder getFilterBuilder() {
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder getFilterBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getFilterFieldBuilder().getBuilder();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder() {
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder() {
if (filterBuilder_ != null) {
return filterBuilder_.getMessageOrBuilder();
} else {
@ -1847,11 +1847,11 @@ public final class ClientProtos {
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder>
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder>
getFilterFieldBuilder() {
if (filterBuilder_ == null) {
filterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder>(
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder>(
filter_,
getParentForChildren(),
isClean());
@ -2049,13 +2049,13 @@ public final class ClientProtos {
extends com.google.protobuf.MessageOrBuilder {
// repeated .Cell cell = 1;
java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell>
java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell>
getCellList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell getCell(int index);
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell getCell(int index);
int getCellCount();
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder>
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder>
getCellOrBuilderList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder getCellOrBuilder(
org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder getCellOrBuilder(
int index);
// optional int32 associatedCellCount = 2;
@ -2093,21 +2093,21 @@ public final class ClientProtos {
private int bitField0_;
// repeated .Cell cell = 1;
public static final int CELL_FIELD_NUMBER = 1;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell> cell_;
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell> getCellList() {
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell> cell_;
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell> getCellList() {
return cell_;
}
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder>
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder>
getCellOrBuilderList() {
return cell_;
}
public int getCellCount() {
return cell_.size();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell getCell(int index) {
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell getCell(int index) {
return cell_.get(index);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder getCellOrBuilder(
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder getCellOrBuilder(
int index) {
return cell_.get(index);
}
@ -2461,7 +2461,7 @@ public final class ClientProtos {
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.newBuilder();
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addCell(subBuilder.buildPartial());
break;
@ -2478,19 +2478,19 @@ public final class ClientProtos {
private int bitField0_;
// repeated .Cell cell = 1;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell> cell_ =
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell> cell_ =
java.util.Collections.emptyList();
private void ensureCellIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
cell_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell>(cell_);
cell_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell>(cell_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder> cellBuilder_;
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder> cellBuilder_;
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell> getCellList() {
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell> getCellList() {
if (cellBuilder_ == null) {
return java.util.Collections.unmodifiableList(cell_);
} else {
@ -2504,7 +2504,7 @@ public final class ClientProtos {
return cellBuilder_.getCount();
}
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell getCell(int index) {
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell getCell(int index) {
if (cellBuilder_ == null) {
return cell_.get(index);
} else {
@ -2512,7 +2512,7 @@ public final class ClientProtos {
}
}
public Builder setCell(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell value) {
int index, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell value) {
if (cellBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@ -2526,7 +2526,7 @@ public final class ClientProtos {
return this;
}
public Builder setCell(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder builderForValue) {
int index, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder builderForValue) {
if (cellBuilder_ == null) {
ensureCellIsMutable();
cell_.set(index, builderForValue.build());
@ -2536,7 +2536,7 @@ public final class ClientProtos {
}
return this;
}
public Builder addCell(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell value) {
public Builder addCell(org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell value) {
if (cellBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@ -2550,7 +2550,7 @@ public final class ClientProtos {
return this;
}
public Builder addCell(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell value) {
int index, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell value) {
if (cellBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@ -2564,7 +2564,7 @@ public final class ClientProtos {
return this;
}
public Builder addCell(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder builderForValue) {
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder builderForValue) {
if (cellBuilder_ == null) {
ensureCellIsMutable();
cell_.add(builderForValue.build());
@ -2575,7 +2575,7 @@ public final class ClientProtos {
return this;
}
public Builder addCell(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder builderForValue) {
int index, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder builderForValue) {
if (cellBuilder_ == null) {
ensureCellIsMutable();
cell_.add(index, builderForValue.build());
@ -2586,7 +2586,7 @@ public final class ClientProtos {
return this;
}
public Builder addAllCell(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell> values) {
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell> values) {
if (cellBuilder_ == null) {
ensureCellIsMutable();
super.addAll(values, cell_);
@ -2616,18 +2616,18 @@ public final class ClientProtos {
}
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder getCellBuilder(
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder getCellBuilder(
int index) {
return getCellFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder getCellOrBuilder(
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder getCellOrBuilder(
int index) {
if (cellBuilder_ == null) {
return cell_.get(index); } else {
return cellBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder>
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder>
getCellOrBuilderList() {
if (cellBuilder_ != null) {
return cellBuilder_.getMessageOrBuilderList();
@ -2635,25 +2635,25 @@ public final class ClientProtos {
return java.util.Collections.unmodifiableList(cell_);
}
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder addCellBuilder() {
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder addCellBuilder() {
return getCellFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.getDefaultInstance());
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.getDefaultInstance());
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder addCellBuilder(
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder addCellBuilder(
int index) {
return getCellFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.getDefaultInstance());
index, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.getDefaultInstance());
}
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder>
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder>
getCellBuilderList() {
return getCellFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder>
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder>
getCellFieldBuilder() {
if (cellBuilder_ == null) {
cellBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder>(
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder>(
cell_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
@ -10587,8 +10587,8 @@ public final class ClientProtos {
// optional .Filter filter = 5;
boolean hasFilter();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder();
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter();
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder();
// optional .TimeRange timeRange = 6;
boolean hasTimeRange();
@ -10724,14 +10724,14 @@ public final class ClientProtos {
// optional .Filter filter = 5;
public static final int FILTER_FIELD_NUMBER = 5;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter filter_;
private org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter filter_;
public boolean hasFilter() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter() {
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter() {
return filter_;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder() {
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder() {
return filter_;
}
@ -10843,7 +10843,7 @@ public final class ClientProtos {
attribute_ = java.util.Collections.emptyList();
startRow_ = com.google.protobuf.ByteString.EMPTY;
stopRow_ = com.google.protobuf.ByteString.EMPTY;
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
maxVersions_ = 1;
cacheBlocks_ = true;
@ -11297,7 +11297,7 @@ public final class ClientProtos {
stopRow_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000008);
if (filterBuilder_ == null) {
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
} else {
filterBuilder_.clear();
}
@ -11621,7 +11621,7 @@ public final class ClientProtos {
break;
}
case 42: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.newBuilder();
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.newBuilder();
if (hasFilter()) {
subBuilder.mergeFrom(getFilter());
}
@ -12110,20 +12110,20 @@ public final class ClientProtos {
}
// optional .Filter filter = 5;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
private org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder> filterBuilder_;
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder> filterBuilder_;
public boolean hasFilter() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter() {
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter() {
if (filterBuilder_ == null) {
return filter_;
} else {
return filterBuilder_.getMessage();
}
}
public Builder setFilter(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter value) {
public Builder setFilter(org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter value) {
if (filterBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@ -12137,7 +12137,7 @@ public final class ClientProtos {
return this;
}
public Builder setFilter(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder builderForValue) {
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder builderForValue) {
if (filterBuilder_ == null) {
filter_ = builderForValue.build();
onChanged();
@ -12147,12 +12147,12 @@ public final class ClientProtos {
bitField0_ |= 0x00000010;
return this;
}
public Builder mergeFilter(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter value) {
public Builder mergeFilter(org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter value) {
if (filterBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
filter_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance()) {
filter_ != org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance()) {
filter_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.newBuilder(filter_).mergeFrom(value).buildPartial();
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.newBuilder(filter_).mergeFrom(value).buildPartial();
} else {
filter_ = value;
}
@ -12165,7 +12165,7 @@ public final class ClientProtos {
}
public Builder clearFilter() {
if (filterBuilder_ == null) {
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
onChanged();
} else {
filterBuilder_.clear();
@ -12173,12 +12173,12 @@ public final class ClientProtos {
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder getFilterBuilder() {
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder getFilterBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getFilterFieldBuilder().getBuilder();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder() {
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder() {
if (filterBuilder_ != null) {
return filterBuilder_.getMessageOrBuilder();
} else {
@ -12186,11 +12186,11 @@ public final class ClientProtos {
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder>
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder>
getFilterFieldBuilder() {
if (filterBuilder_ == null) {
filterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder>(
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder>(
filter_,
getParentForChildren(),
isClean());
@ -21554,99 +21554,100 @@ public final class ClientProtos {
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\014Client.proto\032\013hbase.proto\032\020Comparator." +
"proto\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tquali" +
"fier\030\002 \003(\014\"\342\001\n\003Get\022\013\n\003row\030\001 \002(\014\022\027\n\006colum" +
"n\030\002 \003(\0132\007.Column\022!\n\tattribute\030\003 \003(\0132\016.Na" +
"meBytesPair\022\027\n\006filter\030\004 \001(\0132\007.Filter\022\035\n\t" +
"timeRange\030\005 \001(\0132\n.TimeRange\022\026\n\013maxVersio" +
"ns\030\006 \001(\r:\0011\022\031\n\013cacheBlocks\030\007 \001(\010:\004true\022\022" +
"\n\nstoreLimit\030\010 \001(\r\022\023\n\013storeOffset\030\t \001(\r\"" +
":\n\006Result\022\023\n\004cell\030\001 \003(\0132\005.Cell\022\033\n\023associ" +
"atedCellCount\030\002 \001(\005\"r\n\nGetRequest\022 \n\006reg",
"ion\030\001 \002(\0132\020.RegionSpecifier\022\021\n\003get\030\002 \002(\013" +
"2\004.Get\022\030\n\020closestRowBefore\030\003 \001(\010\022\025\n\rexis" +
"tenceOnly\030\004 \001(\010\"w\n\017MultiGetRequest\022 \n\006re" +
"gion\030\001 \002(\0132\020.RegionSpecifier\022\021\n\003get\030\002 \003(" +
"\0132\004.Get\022\030\n\020closestRowBefore\030\003 \001(\010\022\025\n\rexi" +
"stenceOnly\030\004 \001(\010\"6\n\013GetResponse\022\027\n\006resul" +
"t\030\001 \001(\0132\007.Result\022\016\n\006exists\030\002 \001(\010\";\n\020Mult" +
"iGetResponse\022\027\n\006result\030\001 \003(\0132\007.Result\022\016\n" +
"\006exists\030\002 \003(\010\"\177\n\tCondition\022\013\n\003row\030\001 \002(\014\022" +
"\016\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022!\n\013co",
"mpareType\030\004 \002(\0162\014.CompareType\022\037\n\ncompara" +
"tor\030\005 \002(\0132\013.Comparator\"\365\005\n\rMutationProto" +
"\022\013\n\003row\030\001 \001(\014\022/\n\nmutateType\030\002 \001(\0162\033.Muta" +
"tionProto.MutationType\022/\n\013columnValue\030\003 " +
"\003(\0132\032.MutationProto.ColumnValue\022\021\n\ttimes" +
"tamp\030\004 \001(\004\022!\n\tattribute\030\005 \003(\0132\016.NameByte" +
"sPair\022:\n\ndurability\030\006 \001(\0162\031.MutationProt" +
"o.Durability:\013USE_DEFAULT\022\035\n\ttimeRange\030\007" +
" \001(\0132\n.TimeRange\022\033\n\023associatedCellCount\030" +
"\010 \001(\005\032\326\001\n\013ColumnValue\022\016\n\006family\030\001 \002(\014\022A\n",
"\016qualifierValue\030\002 \003(\0132).MutationProto.Co" +
"lumnValue.QualifierValue\032t\n\016QualifierVal" +
"ue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\t" +
"timestamp\030\003 \001(\004\022-\n\ndeleteType\030\004 \001(\0162\031.Mu" +
"tationProto.DeleteType\"W\n\nDurability\022\017\n\013" +
"USE_DEFAULT\020\000\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_WAL" +
"\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014Mutat" +
"ionType\022\n\n\006APPEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n\003PU" +
"T\020\002\022\n\n\006DELETE\020\003\"U\n\nDeleteType\022\026\n\022DELETE_" +
"ONE_VERSION\020\000\022\034\n\030DELETE_MULTIPLE_VERSION",
"S\020\001\022\021\n\rDELETE_FAMILY\020\002\"r\n\rMutateRequest\022" +
" \n\006region\030\001 \002(\0132\020.RegionSpecifier\022 \n\010mut" +
"ation\030\002 \002(\0132\016.MutationProto\022\035\n\tcondition" +
"\030\003 \001(\0132\n.Condition\"<\n\016MutateResponse\022\027\n\006" +
"result\030\001 \001(\0132\007.Result\022\021\n\tprocessed\030\002 \001(\010" +
"\"\362\002\n\004Scan\022\027\n\006column\030\001 \003(\0132\007.Column\022!\n\tat" +
"tribute\030\002 \003(\0132\016.NameBytesPair\022\020\n\010startRo" +
"w\030\003 \001(\014\022\017\n\007stopRow\030\004 \001(\014\022\027\n\006filter\030\005 \001(\013" +
"2\007.Filter\022\035\n\ttimeRange\030\006 \001(\0132\n.TimeRange" +
"\022\026\n\013maxVersions\030\007 \001(\r:\0011\022\031\n\013cacheBlocks\030",
"\010 \001(\010:\004true\022\021\n\tbatchSize\030\t \001(\r\022\025\n\rmaxRes" +
"ultSize\030\n \001(\004\022\022\n\nstoreLimit\030\013 \001(\r\022\023\n\013sto" +
"reOffset\030\014 \001(\r\022\"\n\032loadColumnFamiliesOnDe" +
"mand\030\r \001(\010\022\024\n\014cachingCount\030\016 \001(\r\022\023\n\013pref" +
"etching\030\017 \001(\010\"\230\001\n\013ScanRequest\022 \n\006region\030" +
"\001 \001(\0132\020.RegionSpecifier\022\023\n\004scan\030\002 \001(\0132\005." +
"Scan\022\021\n\tscannerId\030\003 \001(\004\022\024\n\014numberOfRows\030" +
"\004 \001(\r\022\024\n\014closeScanner\030\005 \001(\010\022\023\n\013nextCallS" +
"eq\030\006 \001(\004\"l\n\014ScanResponse\022\'\n\016resultCellMe" +
"ta\030\001 \001(\0132\017.ResultCellMeta\022\021\n\tscannerId\030\002",
" \001(\004\022\023\n\013moreResults\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\"%" +
"\n\016ResultCellMeta\022\023\n\013cellsLength\030\001 \003(\r\"\260\001" +
"\n\024BulkLoadHFileRequest\022 \n\006region\030\001 \002(\0132\020" +
".RegionSpecifier\0224\n\nfamilyPath\030\002 \003(\0132 .B" +
"ulkLoadHFileRequest.FamilyPath\022\024\n\014assign" +
"SeqNum\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006family\030\001 \002" +
"(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileRespons" +
"e\022\016\n\006loaded\030\001 \002(\010\"_\n\026CoprocessorServiceC" +
"all\022\013\n\003row\030\001 \002(\014\022\023\n\013serviceName\030\002 \002(\t\022\022\n" +
"\nmethodName\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"d\n\031Co",
"processorServiceRequest\022 \n\006region\030\001 \002(\0132" +
"\020.RegionSpecifier\022%\n\004call\030\002 \002(\0132\027.Coproc" +
"essorServiceCall\"]\n\032CoprocessorServiceRe" +
"sponse\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier" +
"\022\035\n\005value\030\002 \002(\0132\016.NameBytesPair\"B\n\013Multi" +
"Action\022 \n\010mutation\030\001 \001(\0132\016.MutationProto" +
"\022\021\n\003get\030\002 \001(\0132\004.Get\"I\n\014ActionResult\022\026\n\005v" +
"alue\030\001 \001(\0132\007.Result\022!\n\texception\030\002 \001(\0132\016" +
".NameBytesPair\"^\n\014MultiRequest\022 \n\006region" +
"\030\001 \002(\0132\020.RegionSpecifier\022\034\n\006action\030\002 \003(\013",
"2\014.MultiAction\022\016\n\006atomic\030\003 \001(\010\".\n\rMultiR" +
"esponse\022\035\n\006result\030\001 \003(\0132\r.ActionResult2\342" +
"\002\n\rClientService\022 \n\003get\022\013.GetRequest\032\014.G" +
"etResponse\022/\n\010multiGet\022\020.MultiGetRequest" +
"\032\021.MultiGetResponse\022)\n\006mutate\022\016.MutateRe" +
"quest\032\017.MutateResponse\022#\n\004scan\022\014.ScanReq" +
"uest\032\r.ScanResponse\022>\n\rbulkLoadHFile\022\025.B" +
"ulkLoadHFileRequest\032\026.BulkLoadHFileRespo" +
"nse\022F\n\013execService\022\032.CoprocessorServiceR" +
"equest\032\033.CoprocessorServiceResponse\022&\n\005m",
"ulti\022\r.MultiRequest\032\016.MultiResponseBB\n*o" +
"rg.apache.hadoop.hbase.protobuf.generate" +
"dB\014ClientProtosH\001\210\001\001\240\001\001"
"\n\014Client.proto\032\013hbase.proto\032\014Filter.prot" +
"o\032\nCell.proto\032\020Comparator.proto\"+\n\006Colum" +
"n\022\016\n\006family\030\001 \002(\014\022\021\n\tqualifier\030\002 \003(\014\"\342\001\n" +
"\003Get\022\013\n\003row\030\001 \002(\014\022\027\n\006column\030\002 \003(\0132\007.Colu" +
"mn\022!\n\tattribute\030\003 \003(\0132\016.NameBytesPair\022\027\n" +
"\006filter\030\004 \001(\0132\007.Filter\022\035\n\ttimeRange\030\005 \001(" +
"\0132\n.TimeRange\022\026\n\013maxVersions\030\006 \001(\r:\0011\022\031\n" +
"\013cacheBlocks\030\007 \001(\010:\004true\022\022\n\nstoreLimit\030\010" +
" \001(\r\022\023\n\013storeOffset\030\t \001(\r\":\n\006Result\022\023\n\004c" +
"ell\030\001 \003(\0132\005.Cell\022\033\n\023associatedCellCount\030",
"\002 \001(\005\"r\n\nGetRequest\022 \n\006region\030\001 \002(\0132\020.Re" +
"gionSpecifier\022\021\n\003get\030\002 \002(\0132\004.Get\022\030\n\020clos" +
"estRowBefore\030\003 \001(\010\022\025\n\rexistenceOnly\030\004 \001(" +
"\010\"w\n\017MultiGetRequest\022 \n\006region\030\001 \002(\0132\020.R" +
"egionSpecifier\022\021\n\003get\030\002 \003(\0132\004.Get\022\030\n\020clo" +
"sestRowBefore\030\003 \001(\010\022\025\n\rexistenceOnly\030\004 \001" +
"(\010\"6\n\013GetResponse\022\027\n\006result\030\001 \001(\0132\007.Resu" +
"lt\022\016\n\006exists\030\002 \001(\010\";\n\020MultiGetResponse\022\027" +
"\n\006result\030\001 \003(\0132\007.Result\022\016\n\006exists\030\002 \003(\010\"" +
"\177\n\tCondition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(",
"\014\022\021\n\tqualifier\030\003 \002(\014\022!\n\013compareType\030\004 \002(" +
"\0162\014.CompareType\022\037\n\ncomparator\030\005 \002(\0132\013.Co" +
"mparator\"\365\005\n\rMutationProto\022\013\n\003row\030\001 \001(\014\022" +
"/\n\nmutateType\030\002 \001(\0162\033.MutationProto.Muta" +
"tionType\022/\n\013columnValue\030\003 \003(\0132\032.Mutation" +
"Proto.ColumnValue\022\021\n\ttimestamp\030\004 \001(\004\022!\n\t" +
"attribute\030\005 \003(\0132\016.NameBytesPair\022:\n\ndurab" +
"ility\030\006 \001(\0162\031.MutationProto.Durability:\013" +
"USE_DEFAULT\022\035\n\ttimeRange\030\007 \001(\0132\n.TimeRan" +
"ge\022\033\n\023associatedCellCount\030\010 \001(\005\032\326\001\n\013Colu",
"mnValue\022\016\n\006family\030\001 \002(\014\022A\n\016qualifierValu" +
"e\030\002 \003(\0132).MutationProto.ColumnValue.Qual" +
"ifierValue\032t\n\016QualifierValue\022\021\n\tqualifie" +
"r\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(" +
"\004\022-\n\ndeleteType\030\004 \001(\0162\031.MutationProto.De" +
"leteType\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022" +
"\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL" +
"\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014MutationType\022\n\n\006APP" +
"END\020\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020" +
"\003\"U\n\nDeleteType\022\026\n\022DELETE_ONE_VERSION\020\000\022",
"\034\n\030DELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_" +
"FAMILY\020\002\"r\n\rMutateRequest\022 \n\006region\030\001 \002(" +
"\0132\020.RegionSpecifier\022 \n\010mutation\030\002 \002(\0132\016." +
"MutationProto\022\035\n\tcondition\030\003 \001(\0132\n.Condi" +
"tion\"<\n\016MutateResponse\022\027\n\006result\030\001 \001(\0132\007" +
".Result\022\021\n\tprocessed\030\002 \001(\010\"\362\002\n\004Scan\022\027\n\006c" +
"olumn\030\001 \003(\0132\007.Column\022!\n\tattribute\030\002 \003(\0132" +
"\016.NameBytesPair\022\020\n\010startRow\030\003 \001(\014\022\017\n\007sto" +
"pRow\030\004 \001(\014\022\027\n\006filter\030\005 \001(\0132\007.Filter\022\035\n\tt" +
"imeRange\030\006 \001(\0132\n.TimeRange\022\026\n\013maxVersion",
"s\030\007 \001(\r:\0011\022\031\n\013cacheBlocks\030\010 \001(\010:\004true\022\021\n" +
"\tbatchSize\030\t \001(\r\022\025\n\rmaxResultSize\030\n \001(\004\022" +
"\022\n\nstoreLimit\030\013 \001(\r\022\023\n\013storeOffset\030\014 \001(\r" +
"\022\"\n\032loadColumnFamiliesOnDemand\030\r \001(\010\022\024\n\014" +
"cachingCount\030\016 \001(\r\022\023\n\013prefetching\030\017 \001(\010\"" +
"\230\001\n\013ScanRequest\022 \n\006region\030\001 \001(\0132\020.Region" +
"Specifier\022\023\n\004scan\030\002 \001(\0132\005.Scan\022\021\n\tscanne" +
"rId\030\003 \001(\004\022\024\n\014numberOfRows\030\004 \001(\r\022\024\n\014close" +
"Scanner\030\005 \001(\010\022\023\n\013nextCallSeq\030\006 \001(\004\"l\n\014Sc" +
"anResponse\022\'\n\016resultCellMeta\030\001 \001(\0132\017.Res",
"ultCellMeta\022\021\n\tscannerId\030\002 \001(\004\022\023\n\013moreRe" +
"sults\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\"%\n\016ResultCellMe" +
"ta\022\023\n\013cellsLength\030\001 \003(\r\"\260\001\n\024BulkLoadHFil" +
"eRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi" +
"er\0224\n\nfamilyPath\030\002 \003(\0132 .BulkLoadHFileRe" +
"quest.FamilyPath\022\024\n\014assignSeqNum\030\003 \001(\010\032*" +
"\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002" +
"(\t\"\'\n\025BulkLoadHFileResponse\022\016\n\006loaded\030\001 " +
"\002(\010\"_\n\026CoprocessorServiceCall\022\013\n\003row\030\001 \002" +
"(\014\022\023\n\013serviceName\030\002 \002(\t\022\022\n\nmethodName\030\003 ",
"\002(\t\022\017\n\007request\030\004 \002(\014\"d\n\031CoprocessorServi" +
"ceRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif" +
"ier\022%\n\004call\030\002 \002(\0132\027.CoprocessorServiceCa" +
"ll\"]\n\032CoprocessorServiceResponse\022 \n\006regi" +
"on\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005value\030\002 \002(" +
"\0132\016.NameBytesPair\"B\n\013MultiAction\022 \n\010muta" +
"tion\030\001 \001(\0132\016.MutationProto\022\021\n\003get\030\002 \001(\0132" +
"\004.Get\"I\n\014ActionResult\022\026\n\005value\030\001 \001(\0132\007.R" +
"esult\022!\n\texception\030\002 \001(\0132\016.NameBytesPair" +
"\"^\n\014MultiRequest\022 \n\006region\030\001 \002(\0132\020.Regio",
"nSpecifier\022\034\n\006action\030\002 \003(\0132\014.MultiAction" +
"\022\016\n\006atomic\030\003 \001(\010\".\n\rMultiResponse\022\035\n\006res" +
"ult\030\001 \003(\0132\r.ActionResult2\342\002\n\rClientServi" +
"ce\022 \n\003get\022\013.GetRequest\032\014.GetResponse\022/\n\010" +
"multiGet\022\020.MultiGetRequest\032\021.MultiGetRes" +
"ponse\022)\n\006mutate\022\016.MutateRequest\032\017.Mutate" +
"Response\022#\n\004scan\022\014.ScanRequest\032\r.ScanRes" +
"ponse\022>\n\rbulkLoadHFile\022\025.BulkLoadHFileRe" +
"quest\032\026.BulkLoadHFileResponse\022F\n\013execSer" +
"vice\022\032.CoprocessorServiceRequest\032\033.Copro",
"cessorServiceResponse\022&\n\005multi\022\r.MultiRe" +
"quest\032\016.MultiResponseBB\n*org.apache.hado" +
"op.hbase.protobuf.generatedB\014ClientProto" +
"sH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -21876,6 +21877,8 @@ public final class ClientProtos {
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.CellProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.getDescriptor(),
}, assigner);
}

View File

@ -1138,8 +1138,8 @@ public final class RegionServerStatusProtos {
// optional .ServerLoad load = 2;
boolean hasLoad();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder();
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad();
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder();
}
public static final class RegionServerReportRequest extends
com.google.protobuf.GeneratedMessage
@ -1185,20 +1185,20 @@ public final class RegionServerStatusProtos {
// optional .ServerLoad load = 2;
public static final int LOAD_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad load_;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad load_;
public boolean hasLoad() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad() {
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad() {
return load_;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder() {
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder() {
return load_;
}
private void initFields() {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@ -1424,7 +1424,7 @@ public final class RegionServerStatusProtos {
}
bitField0_ = (bitField0_ & ~0x00000001);
if (loadBuilder_ == null) {
load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
} else {
loadBuilder_.clear();
}
@ -1560,7 +1560,7 @@ public final class RegionServerStatusProtos {
break;
}
case 18: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.newBuilder();
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder();
if (hasLoad()) {
subBuilder.mergeFrom(getLoad());
}
@ -1665,20 +1665,20 @@ public final class RegionServerStatusProtos {
}
// optional .ServerLoad load = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder> loadBuilder_;
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder> loadBuilder_;
public boolean hasLoad() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad() {
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad() {
if (loadBuilder_ == null) {
return load_;
} else {
return loadBuilder_.getMessage();
}
}
public Builder setLoad(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad value) {
public Builder setLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
if (loadBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@ -1692,7 +1692,7 @@ public final class RegionServerStatusProtos {
return this;
}
public Builder setLoad(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder builderForValue) {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder builderForValue) {
if (loadBuilder_ == null) {
load_ = builderForValue.build();
onChanged();
@ -1702,12 +1702,12 @@ public final class RegionServerStatusProtos {
bitField0_ |= 0x00000002;
return this;
}
public Builder mergeLoad(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad value) {
public Builder mergeLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
if (loadBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
load_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance()) {
load_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) {
load_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial();
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial();
} else {
load_ = value;
}
@ -1720,7 +1720,7 @@ public final class RegionServerStatusProtos {
}
public Builder clearLoad() {
if (loadBuilder_ == null) {
load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
onChanged();
} else {
loadBuilder_.clear();
@ -1728,12 +1728,12 @@ public final class RegionServerStatusProtos {
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder getLoadBuilder() {
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder getLoadBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getLoadFieldBuilder().getBuilder();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder() {
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder() {
if (loadBuilder_ != null) {
return loadBuilder_.getMessageOrBuilder();
} else {
@ -1741,11 +1741,11 @@ public final class RegionServerStatusProtos {
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder>
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder>
getLoadFieldBuilder() {
if (loadBuilder_ == null) {
loadBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder>(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder>(
load_,
getParentForChildren(),
isClean());
@ -4175,31 +4175,32 @@ public final class RegionServerStatusProtos {
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\030RegionServerStatus.proto\032\013hbase.proto\"" +
"^\n\032RegionServerStartupRequest\022\014\n\004port\030\001 " +
"\002(\r\022\027\n\017serverStartCode\030\002 \002(\004\022\031\n\021serverCu" +
"rrentTime\030\003 \002(\004\"B\n\033RegionServerStartupRe" +
"sponse\022#\n\nmapEntries\030\001 \003(\0132\017.NameStringP" +
"air\"S\n\031RegionServerReportRequest\022\033\n\006serv" +
"er\030\001 \002(\0132\013.ServerName\022\031\n\004load\030\002 \001(\0132\013.Se" +
"rverLoad\"\034\n\032RegionServerReportResponse\"N" +
"\n\031ReportRSFatalErrorRequest\022\033\n\006server\030\001 " +
"\002(\0132\013.ServerName\022\024\n\014errorMessage\030\002 \002(\t\"\034",
"\n\032ReportRSFatalErrorResponse\"5\n\037GetLastF" +
"lushedSequenceIdRequest\022\022\n\nregionName\030\001 " +
"\002(\014\"A\n GetLastFlushedSequenceIdResponse\022" +
"\035\n\025lastFlushedSequenceId\030\001 \002(\0042\354\002\n\031Regio" +
"nServerStatusService\022P\n\023regionServerStar" +
"tup\022\033.RegionServerStartupRequest\032\034.Regio" +
"nServerStartupResponse\022M\n\022regionServerRe" +
"port\022\032.RegionServerReportRequest\032\033.Regio" +
"nServerReportResponse\022M\n\022reportRSFatalEr" +
"ror\022\032.ReportRSFatalErrorRequest\032\033.Report",
"RSFatalErrorResponse\022_\n\030getLastFlushedSe" +
"quenceId\022 .GetLastFlushedSequenceIdReque" +
"st\032!.GetLastFlushedSequenceIdResponseBN\n" +
"*org.apache.hadoop.hbase.protobuf.genera" +
"tedB\030RegionServerStatusProtosH\001\210\001\001\240\001\001"
"\n\030RegionServerStatus.proto\032\013hbase.proto\032" +
"\023ClusterStatus.proto\"^\n\032RegionServerStar" +
"tupRequest\022\014\n\004port\030\001 \002(\r\022\027\n\017serverStartC" +
"ode\030\002 \002(\004\022\031\n\021serverCurrentTime\030\003 \002(\004\"B\n\033" +
"RegionServerStartupResponse\022#\n\nmapEntrie" +
"s\030\001 \003(\0132\017.NameStringPair\"S\n\031RegionServer" +
"ReportRequest\022\033\n\006server\030\001 \002(\0132\013.ServerNa" +
"me\022\031\n\004load\030\002 \001(\0132\013.ServerLoad\"\034\n\032RegionS" +
"erverReportResponse\"N\n\031ReportRSFatalErro" +
"rRequest\022\033\n\006server\030\001 \002(\0132\013.ServerName\022\024\n",
"\014errorMessage\030\002 \002(\t\"\034\n\032ReportRSFatalErro" +
"rResponse\"5\n\037GetLastFlushedSequenceIdReq" +
"uest\022\022\n\nregionName\030\001 \002(\014\"A\n GetLastFlush" +
"edSequenceIdResponse\022\035\n\025lastFlushedSeque" +
"nceId\030\001 \002(\0042\354\002\n\031RegionServerStatusServic" +
"e\022P\n\023regionServerStartup\022\033.RegionServerS" +
"tartupRequest\032\034.RegionServerStartupRespo" +
"nse\022M\n\022regionServerReport\022\032.RegionServer" +
"ReportRequest\032\033.RegionServerReportRespon" +
"se\022M\n\022reportRSFatalError\022\032.ReportRSFatal",
"ErrorRequest\032\033.ReportRSFatalErrorRespons" +
"e\022_\n\030getLastFlushedSequenceId\022 .GetLastF" +
"lushedSequenceIdRequest\032!.GetLastFlushed" +
"SequenceIdResponseBN\n*org.apache.hadoop." +
"hbase.protobuf.generatedB\030RegionServerSt" +
"atusProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -4277,6 +4278,7 @@ public final class RegionServerStatusProtos {
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(),
}, assigner);
}

View File

@ -8,7 +8,7 @@ public final class ZooKeeperProtos {
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface RootRegionServerOrBuilder
public interface MetaRegionServerOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .ServerName server = 1;
@ -16,32 +16,32 @@ public final class ZooKeeperProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
}
public static final class RootRegionServer extends
public static final class MetaRegionServer extends
com.google.protobuf.GeneratedMessage
implements RootRegionServerOrBuilder {
// Use RootRegionServer.newBuilder() to construct.
private RootRegionServer(Builder builder) {
implements MetaRegionServerOrBuilder {
// Use MetaRegionServer.newBuilder() to construct.
private MetaRegionServer(Builder builder) {
super(builder);
}
private RootRegionServer(boolean noInit) {}
private MetaRegionServer(boolean noInit) {}
private static final RootRegionServer defaultInstance;
public static RootRegionServer getDefaultInstance() {
private static final MetaRegionServer defaultInstance;
public static MetaRegionServer getDefaultInstance() {
return defaultInstance;
}
public RootRegionServer getDefaultInstanceForType() {
public MetaRegionServer getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor;
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable;
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_fieldAccessorTable;
}
private int bitField0_;
@ -114,10 +114,10 @@ public final class ZooKeeperProtos {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)) {
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) obj;
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) obj;
boolean result = true;
result = result && (hasServer() == other.hasServer());
@ -142,41 +142,41 @@ public final class ZooKeeperProtos {
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(byte[] data)
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(java.io.InputStream input)
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom(java.io.InputStream input)
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
@ -185,7 +185,7 @@ public final class ZooKeeperProtos {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom(
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@ -196,12 +196,12 @@ public final class ZooKeeperProtos {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@ -211,7 +211,7 @@ public final class ZooKeeperProtos {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer prototype) {
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@ -224,18 +224,18 @@ public final class ZooKeeperProtos {
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServerOrBuilder {
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServerOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor;
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable;
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.newBuilder()
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
@ -270,24 +270,24 @@ public final class ZooKeeperProtos {
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDescriptor();
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDescriptor();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance();
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial();
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildParsed()
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial();
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
@ -295,8 +295,8 @@ public final class ZooKeeperProtos {
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer(this);
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
@ -313,16 +313,16 @@ public final class ZooKeeperProtos {
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)other);
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance()) return this;
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance()) return this;
if (other.hasServer()) {
mergeServer(other.getServer());
}
@ -470,15 +470,15 @@ public final class ZooKeeperProtos {
return serverBuilder_;
}
// @@protoc_insertion_point(builder_scope:RootRegionServer)
// @@protoc_insertion_point(builder_scope:MetaRegionServer)
}
static {
defaultInstance = new RootRegionServer(true);
defaultInstance = new MetaRegionServer(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RootRegionServer)
// @@protoc_insertion_point(class_scope:MetaRegionServer)
}
public interface MasterOrBuilder
@ -6868,10 +6868,10 @@ public final class ZooKeeperProtos {
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RootRegionServer_descriptor;
internal_static_MetaRegionServer_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RootRegionServer_fieldAccessorTable;
internal_static_MetaRegionServer_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_Master_descriptor;
private static
@ -6941,7 +6941,7 @@ public final class ZooKeeperProtos {
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020RootRe" +
"\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020MetaRe" +
"gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\"" +
"%\n\006Master\022\033\n\006master\030\001 \002(\0132\013.ServerName\"\036" +
"\n\tClusterUp\022\021\n\tstartDate\030\001 \002(\t\"\203\001\n\020Regio" +
@ -6976,14 +6976,14 @@ public final class ZooKeeperProtos {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_RootRegionServer_descriptor =
internal_static_MetaRegionServer_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_RootRegionServer_fieldAccessorTable = new
internal_static_MetaRegionServer_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RootRegionServer_descriptor,
internal_static_MetaRegionServer_descriptor,
new java.lang.String[] { "Server", },
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.class,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.Builder.class);
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.class,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder.class);
internal_static_Master_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_Master_fieldAccessorTable = new

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Cell and KeyValue protos
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
option java_outer_classname = "CellProtos";
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
/**
* The type of the key in a Cell
*/
enum CellType {
MINIMUM = 0;
PUT = 4;
DELETE = 8;
DELETE_COLUMN = 12;
DELETE_FAMILY = 14;
// MAXIMUM is used when searching; you look from maximum on down.
MAXIMUM = 255;
}
/**
* Protocol buffer version of Cell.
*/
message Cell {
optional bytes row = 1;
optional bytes family = 2;
optional bytes qualifier = 3;
optional uint64 timestamp = 4;
optional CellType cellType = 5;
optional bytes value = 6;
}
/**
* Protocol buffer version of KeyValue.
* It doesn't have those transient parameters
*/
message KeyValue {
required bytes row = 1;
required bytes family = 2;
required bytes qualifier = 3;
optional uint64 timestamp = 4;
optional CellType keyType = 5;
optional bytes value = 6;
}

View File

@ -25,6 +25,8 @@ option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "hbase.proto";
import "Filter.proto";
import "Cell.proto";
import "Comparator.proto";
/**

View File

@ -51,11 +51,110 @@ message RegionInTransition {
required RegionState regionState = 2;
}
message RegionLoad {
/** the region specifier */
required RegionSpecifier regionSpecifier = 1;
/** the number of stores for the region */
optional uint32 stores = 2;
/** the number of storefiles for the region */
optional uint32 storefiles = 3;
/** the total size of the store files for the region, uncompressed, in MB */
optional uint32 storeUncompressedSizeMB = 4;
/** the current total size of the store files for the region, in MB */
optional uint32 storefileSizeMB = 5;
/** the current size of the memstore for the region, in MB */
optional uint32 memstoreSizeMB = 6;
/**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {@link #rootIndexSizeKB} but in MB.
*/
optional uint32 storefileIndexSizeMB = 7;
/** the current total read requests made to region */
optional uint64 readRequestsCount = 8;
/** the current total write requests made to region */
optional uint64 writeRequestsCount = 9;
/** the total compacting key values in currently running compaction */
optional uint64 totalCompactingKVs = 10;
/** the completed count of key values in currently running compaction */
optional uint64 currentCompactedKVs = 11;
/** The current total size of root-level indexes for the region, in KB. */
optional uint32 rootIndexSizeKB = 12;
/** The total size of all index blocks, not just the root level, in KB. */
optional uint32 totalStaticIndexSizeKB = 13;
/**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
*/
optional uint32 totalStaticBloomSizeKB = 14;
/** the most recent sequence Id from cache flush */
optional uint64 completeSequenceId = 15;
}
/* Server-level protobufs */
message ServerLoad {
/** Number of requests since last report. */
optional uint32 numberOfRequests = 1;
/** Total Number of requests from the start of the region server. */
optional uint32 totalNumberOfRequests = 2;
/** the amount of used heap, in MB. */
optional uint32 usedHeapMB = 3;
/** the maximum allowable size of the heap, in MB. */
optional uint32 maxHeapMB = 4;
/** Information on the load of individual regions. */
repeated RegionLoad regionLoads = 5;
/**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
*/
repeated Coprocessor coprocessors = 6;
/**
* Time when incremental (non-total) counts began being calculated (e.g. numberOfRequests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
*/
optional uint64 reportStartTime = 7;
/**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
*/
optional uint64 reportEndTime = 8;
/**
* The port number that this region server is hosing an info server on.
*/
optional uint32 infoServerPort = 9;
}
message LiveServerInfo {
required ServerName server = 1;
required ServerLoad serverLoad = 2;
}
message ClusterStatus {
optional HBaseVersionFileContent hbaseVersion = 1;
repeated LiveServerInfo liveServers = 2;

View File

@ -27,6 +27,11 @@ option optimize_for = SPEED;
import "hbase.proto";
import "Comparator.proto";
message Filter {
required string name = 1;
optional bytes serializedFilter = 2;
}
message ColumnCountGetFilter {
required int32 limit = 1;
}

View File

@ -25,6 +25,7 @@ option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "hbase.proto";
import "ClusterStatus.proto";
message RegionServerStartupRequest {
/** Port number this regionserver is up on */

View File

@ -28,10 +28,10 @@ option optimize_for = SPEED;
import "hbase.proto";
/**
* Content of the root-region-server znode.
* Content of the meta-region-server znode.
*/
message RootRegionServer {
// The ServerName hosting the root region currently.
message MetaRegionServer {
// The ServerName hosting the meta region currently.
required ServerName server = 1;
}

View File

@ -23,32 +23,7 @@ option java_outer_classname = "HBaseProtos";
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
/**
* The type of the key in a Cell
*/
enum CellType {
MINIMUM = 0;
PUT = 4;
DELETE = 8;
DELETE_COLUMN = 12;
DELETE_FAMILY = 14;
// MAXIMUM is used when searching; you look from maximum on down.
MAXIMUM = 255;
}
/**
* Protocol buffer version of Cell.
*/
message Cell {
optional bytes row = 1;
optional bytes family = 2;
optional bytes qualifier = 3;
optional uint64 timestamp = 4;
optional CellType cellType = 5;
optional bytes value = 6;
}
import "Cell.proto";
/**
* Table Schema
@ -110,104 +85,6 @@ message RegionSpecifier {
}
}
message RegionLoad {
/** the region specifier */
required RegionSpecifier regionSpecifier = 1;
/** the number of stores for the region */
optional uint32 stores = 2;
/** the number of storefiles for the region */
optional uint32 storefiles = 3;
/** the total size of the store files for the region, uncompressed, in MB */
optional uint32 storeUncompressedSizeMB = 4;
/** the current total size of the store files for the region, in MB */
optional uint32 storefileSizeMB = 5;
/** the current size of the memstore for the region, in MB */
optional uint32 memstoreSizeMB = 6;
/**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {@link #rootIndexSizeKB} but in MB.
*/
optional uint32 storefileIndexSizeMB = 7;
/** the current total read requests made to region */
optional uint64 readRequestsCount = 8;
/** the current total write requests made to region */
optional uint64 writeRequestsCount = 9;
/** the total compacting key values in currently running compaction */
optional uint64 totalCompactingKVs = 10;
/** the completed count of key values in currently running compaction */
optional uint64 currentCompactedKVs = 11;
/** The current total size of root-level indexes for the region, in KB. */
optional uint32 rootIndexSizeKB = 12;
/** The total size of all index blocks, not just the root level, in KB. */
optional uint32 totalStaticIndexSizeKB = 13;
/**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
*/
optional uint32 totalStaticBloomSizeKB = 14;
/** the most recent sequence Id from cache flush */
optional uint64 completeSequenceId = 15;
}
/* Server-level protobufs */
message ServerLoad {
/** Number of requests since last report. */
optional uint32 numberOfRequests = 1;
/** Total Number of requests from the start of the region server. */
optional uint32 totalNumberOfRequests = 2;
/** the amount of used heap, in MB. */
optional uint32 usedHeapMB = 3;
/** the maximum allowable size of the heap, in MB. */
optional uint32 maxHeapMB = 4;
/** Information on the load of individual regions. */
repeated RegionLoad regionLoads = 5;
/**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
*/
repeated Coprocessor coprocessors = 6;
/**
* Time when incremental (non-total) counts began being calculated (e.g. numberOfRequests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
*/
optional uint64 reportStartTime = 7;
/**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
*/
optional uint64 reportEndTime = 8;
/**
* The port number that this region server is hosing an info server on.
*/
optional uint32 infoServerPort = 9;
}
/**
* A range of time. Both from and to are Java time
* stamp in milliseconds. If you don't specify a time
@ -219,11 +96,6 @@ message TimeRange {
optional uint64 to = 2;
}
message Filter {
required string name = 1;
optional bytes serializedFilter = 2;
}
/* Comparison operators */
enum CompareType {
LESS = 0;
@ -235,19 +107,6 @@ enum CompareType {
NO_OP = 6;
}
/**
* Protocol buffer version of KeyValue.
* It doesn't have those transient parameters
*/
message KeyValue {
required bytes row = 1;
required bytes family = 2;
required bytes qualifier = 3;
optional uint64 timestamp = 4;
optional CellType keyType = 5;
optional bytes value = 6;
}
/**
* Protocol buffer version of ServerName
*/

View File

@ -30,7 +30,7 @@ org.apache.hadoop.hbase.ServerName;
org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
</%import>
<%if format.equals("json") %>
<& ../common/TaskMonitorTmpl; filter = filter; format = "json" &>

View File

@ -29,7 +29,7 @@
org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
</%import>
<%if (onlineRegions != null && onlineRegions.size() > 0) %>

View File

@ -29,7 +29,7 @@ org.apache.hadoop.hbase.ServerName;
org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram;
org.apache.hadoop.util.StringUtils;
com.yammer.metrics.stats.Snapshot;

View File

@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.codec.BaseDecoder;
import org.apache.hadoop.hbase.codec.BaseEncoder;
import org.apache.hadoop.hbase.codec.Codec;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
import com.google.protobuf.ByteString;
import org.apache.hadoop.classification.InterfaceStability;
@ -47,7 +47,7 @@ public class MessageCodec implements Codec {
@Override
public void write(Cell cell) throws IOException {
checkFlushed();
HBaseProtos.Cell.Builder builder = HBaseProtos.Cell.newBuilder();
CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder();
// This copies bytes from Cell to ByteString. I don't see anyway around the copy.
// ByteString is final.
builder.setRow(ByteString.copyFrom(cell.getRowArray(), cell.getRowOffset(),
@ -57,10 +57,10 @@ public class MessageCodec implements Codec {
builder.setQualifier(ByteString.copyFrom(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength()));
builder.setTimestamp(cell.getTimestamp());
builder.setCellType(HBaseProtos.CellType.valueOf(cell.getTypeByte()));
builder.setCellType(CellProtos.CellType.valueOf(cell.getTypeByte()));
builder.setValue(ByteString.copyFrom(cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength()));
HBaseProtos.Cell pbcell = builder.build();
CellProtos.Cell pbcell = builder.build();
pbcell.writeDelimitedTo(this.out);
}
}
@ -71,7 +71,7 @@ public class MessageCodec implements Codec {
}
protected Cell parseCell() throws IOException {
HBaseProtos.Cell pbcell = HBaseProtos.Cell.parseDelimitedFrom(this.in);
CellProtos.Cell pbcell = CellProtos.Cell.parseDelimitedFrom(this.in);
return CellUtil.createCell(pbcell.getRow().toByteArray(),
pbcell.getFamily().toByteArray(), pbcell.getQualifier().toByteArray(),
pbcell.getTimestamp(), (byte)pbcell.getCellType().getNumber(),

View File

@ -102,7 +102,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
@ -1244,7 +1244,7 @@ MasterServices, Server {
public RegionServerReportResponse regionServerReport(
RpcController controller, RegionServerReportRequest request) throws ServiceException {
try {
HBaseProtos.ServerLoad sl = request.getLoad();
ClusterStatusProtos.ServerLoad sl = request.getLoad();
this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl));
if (sl != null && this.metricsMaster != null) {
// Up our metrics.

View File

@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
import org.apache.hadoop.hbase.exceptions.PleaseHoldException;
import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;

View File

@ -170,10 +170,10 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Mut
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
@ -978,7 +978,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
void tryRegionServerReport(long reportStartTime, long reportEndTime)
throws IOException {
HBaseProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
ClusterStatusProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
try {
RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
ServerName sn = ServerName.parseVersionedServerName(
@ -1000,7 +1000,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
}
}
HBaseProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
// We're getting the MetricsRegionServerWrapper here because the wrapper computes requests
// per second, and other metrics As long as metrics are part of ServerLoad it's best to use
// the wrapper to compute those numbers in one place.
@ -1013,7 +1013,8 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
MemoryUsage memory =
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
HBaseProtos.ServerLoad.Builder serverLoad = HBaseProtos.ServerLoad.newBuilder();
ClusterStatusProtos.ServerLoad.Builder serverLoad =
ClusterStatusProtos.ServerLoad.newBuilder();
serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond());
serverLoad.setTotalNumberOfRequests((int) regionServerWrapper.getTotalRequestCount());
serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024));

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase;
import static org.junit.Assert.*;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -56,7 +57,7 @@ public class TestServerLoad {
assertTrue(slToString.contains("coprocessors=[]"));
}
private HBaseProtos.ServerLoad createServerLoadProto() {
private ClusterStatusProtos.ServerLoad createServerLoadProto() {
HBaseProtos.RegionSpecifier rSpecOne =
HBaseProtos.RegionSpecifier.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
@ -66,17 +67,18 @@ public class TestServerLoad {
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
.setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
HBaseProtos.RegionLoad rlOne =
HBaseProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
ClusterStatusProtos.RegionLoad rlOne =
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
.setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
.setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).build();
HBaseProtos.RegionLoad rlTwo =
HBaseProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
ClusterStatusProtos.RegionLoad rlTwo =
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
.setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
.setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).build();
HBaseProtos.ServerLoad sl =
HBaseProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).addRegionLoads(rlTwo).build();
ClusterStatusProtos.ServerLoad sl =
ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
addRegionLoads(rlTwo).build();
return sl;
}

View File

@ -18,23 +18,17 @@
*/
package org.apache.hadoop.hbase.filter;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertTrue;
/**
* Test for the ColumnPaginationFilter, used mainly to test the successful serialization of the filter.
* More test functionality can be found within {@link org.apache.hadoop.hbase.filter.TestFilter#testColumnPaginationFilter()}
@ -65,7 +59,7 @@ public class TestColumnPaginationFilter
}
private Filter serializationTest(Filter filter) throws Exception {
HBaseProtos.Filter filterProto = ProtobufUtil.toFilter(filter);
FilterProtos.Filter filterProto = ProtobufUtil.toFilter(filter);
Filter newFilter = ProtobufUtil.toFilter(filterProto);
return newFilter;

View File

@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
@ -72,7 +72,7 @@ public class TestMasterMetrics {
HRegionServer rs = cluster.getRegionServer(0);
request.setServer(ProtobufUtil.toServerName(rs.getServerName()));
HBaseProtos.ServerLoad sl = HBaseProtos.ServerLoad.newBuilder()
ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
.setTotalNumberOfRequests(10000)
.build();
master.getMetrics().getMetricsSource().init();