HBASE-8788 Edit of .proto files moving classes to better homes
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1495642 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e67e74c02e
commit
8781231132
|
@ -21,7 +21,7 @@
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Strings;
|
import org.apache.hadoop.hbase.util.Strings;
|
||||||
|
|
||||||
|
@ -31,9 +31,9 @@ import org.apache.hadoop.hbase.util.Strings;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class RegionLoad {
|
public class RegionLoad {
|
||||||
|
|
||||||
protected HBaseProtos.RegionLoad regionLoadPB;
|
protected ClusterStatusProtos.RegionLoad regionLoadPB;
|
||||||
|
|
||||||
public RegionLoad(HBaseProtos.RegionLoad regionLoadPB) {
|
public RegionLoad(ClusterStatusProtos.RegionLoad regionLoadPB) {
|
||||||
this.regionLoadPB = regionLoadPB;
|
this.regionLoadPB = regionLoadPB;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,4 +203,4 @@ public class RegionLoad {
|
||||||
compactionProgressPct);
|
compactionProgressPct);
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -22,6 +22,7 @@ package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -53,9 +54,9 @@ public class ServerLoad {
|
||||||
private long totalCompactingKVs = 0;
|
private long totalCompactingKVs = 0;
|
||||||
private long currentCompactedKVs = 0;
|
private long currentCompactedKVs = 0;
|
||||||
|
|
||||||
public ServerLoad(HBaseProtos.ServerLoad serverLoad) {
|
public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
|
||||||
this.serverLoad = serverLoad;
|
this.serverLoad = serverLoad;
|
||||||
for (HBaseProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) {
|
for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) {
|
||||||
stores += rl.getStores();
|
stores += rl.getStores();
|
||||||
storefiles += rl.getStorefiles();
|
storefiles += rl.getStorefiles();
|
||||||
storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB();
|
storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB();
|
||||||
|
@ -76,11 +77,11 @@ public class ServerLoad {
|
||||||
// NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because
|
// NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because
|
||||||
// HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967).
|
// HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967).
|
||||||
/* @return the underlying ServerLoad protobuf object */
|
/* @return the underlying ServerLoad protobuf object */
|
||||||
public HBaseProtos.ServerLoad obtainServerLoadPB() {
|
public ClusterStatusProtos.ServerLoad obtainServerLoadPB() {
|
||||||
return serverLoad;
|
return serverLoad;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected HBaseProtos.ServerLoad serverLoad;
|
protected ClusterStatusProtos.ServerLoad serverLoad;
|
||||||
|
|
||||||
/* @return number of requests since last report. */
|
/* @return number of requests since last report. */
|
||||||
public int getNumberOfRequests() {
|
public int getNumberOfRequests() {
|
||||||
|
@ -200,7 +201,7 @@ public class ServerLoad {
|
||||||
public Map<byte[], RegionLoad> getRegionsLoad() {
|
public Map<byte[], RegionLoad> getRegionsLoad() {
|
||||||
Map<byte[], RegionLoad> regionLoads =
|
Map<byte[], RegionLoad> regionLoads =
|
||||||
new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
|
new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
|
||||||
for (HBaseProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) {
|
for (ClusterStatusProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) {
|
||||||
RegionLoad regionLoad = new RegionLoad(rl);
|
RegionLoad regionLoad = new RegionLoad(rl);
|
||||||
regionLoads.put(regionLoad.getName(), regionLoad);
|
regionLoads.put(regionLoad.getName(), regionLoad);
|
||||||
}
|
}
|
||||||
|
@ -299,5 +300,5 @@ public class ServerLoad {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final ServerLoad EMPTY_SERVERLOAD =
|
public static final ServerLoad EMPTY_SERVERLOAD =
|
||||||
new ServerLoad(HBaseProtos.ServerLoad.newBuilder().build());
|
new ServerLoad(ClusterStatusProtos.ServerLoad.newBuilder().build());
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer;
|
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer;
|
||||||
import org.apache.hadoop.hbase.util.Addressing;
|
import org.apache.hadoop.hbase.util.Addressing;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
|
@ -339,8 +339,8 @@ public class ServerName implements Comparable<ServerName> {
|
||||||
if (ProtobufUtil.isPBMagicPrefix(data)) {
|
if (ProtobufUtil.isPBMagicPrefix(data)) {
|
||||||
int prefixLen = ProtobufUtil.lengthOfPBMagic();
|
int prefixLen = ProtobufUtil.lengthOfPBMagic();
|
||||||
try {
|
try {
|
||||||
RootRegionServer rss =
|
MetaRegionServer rss =
|
||||||
RootRegionServer.newBuilder().mergeFrom(data, prefixLen, data.length - prefixLen).build();
|
MetaRegionServer.newBuilder().mergeFrom(data, prefixLen, data.length - prefixLen).build();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = rss.getServer();
|
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = rss.getServer();
|
||||||
return new ServerName(sn.getHostName(), sn.getPort(), sn.getStartCode());
|
return new ServerName(sn.getHostName(), sn.getPort(), sn.getStartCode());
|
||||||
} catch (InvalidProtocolBufferException e) {
|
} catch (InvalidProtocolBufferException e) {
|
||||||
|
|
|
@ -25,13 +25,10 @@ import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
|
||||||
|
|
||||||
import com.google.protobuf.InvalidProtocolBufferException;
|
import com.google.protobuf.InvalidProtocolBufferException;
|
||||||
|
|
||||||
|
@ -55,7 +52,6 @@ public class FilterList extends Filter {
|
||||||
MUST_PASS_ONE
|
MUST_PASS_ONE
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final Configuration conf = HBaseConfiguration.create();
|
|
||||||
private static final int MAX_LOG_FILTERS = 5;
|
private static final int MAX_LOG_FILTERS = 5;
|
||||||
private Operator operator = Operator.MUST_PASS_ALL;
|
private Operator operator = Operator.MUST_PASS_ALL;
|
||||||
private List<Filter> filters = new ArrayList<Filter>();
|
private List<Filter> filters = new ArrayList<Filter>();
|
||||||
|
@ -306,7 +302,7 @@ public class FilterList extends Filter {
|
||||||
|
|
||||||
List<Filter> rowFilters = new ArrayList<Filter>(proto.getFiltersCount());
|
List<Filter> rowFilters = new ArrayList<Filter>(proto.getFiltersCount());
|
||||||
try {
|
try {
|
||||||
for (HBaseProtos.Filter filter : proto.getFiltersList()) {
|
for (FilterProtos.Filter filter : proto.getFiltersList()) {
|
||||||
rowFilters.add(ProtobufUtil.toFilter(filter));
|
rowFilters.add(ProtobufUtil.toFilter(filter));
|
||||||
}
|
}
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
|
|
|
@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
|
||||||
|
@ -97,10 +98,11 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Del
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
|
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
|
||||||
|
@ -383,7 +385,7 @@ public final class ProtobufUtil {
|
||||||
get.setTimeRange(minStamp, maxStamp);
|
get.setTimeRange(minStamp, maxStamp);
|
||||||
}
|
}
|
||||||
if (proto.hasFilter()) {
|
if (proto.hasFilter()) {
|
||||||
HBaseProtos.Filter filter = proto.getFilter();
|
FilterProtos.Filter filter = proto.getFilter();
|
||||||
get.setFilter(ProtobufUtil.toFilter(filter));
|
get.setFilter(ProtobufUtil.toFilter(filter));
|
||||||
}
|
}
|
||||||
for (NameBytesPair attribute: proto.getAttributeList()) {
|
for (NameBytesPair attribute: proto.getAttributeList()) {
|
||||||
|
@ -822,7 +824,7 @@ public final class ProtobufUtil {
|
||||||
scan.setTimeRange(minStamp, maxStamp);
|
scan.setTimeRange(minStamp, maxStamp);
|
||||||
}
|
}
|
||||||
if (proto.hasFilter()) {
|
if (proto.hasFilter()) {
|
||||||
HBaseProtos.Filter filter = proto.getFilter();
|
FilterProtos.Filter filter = proto.getFilter();
|
||||||
scan.setFilter(ProtobufUtil.toFilter(filter));
|
scan.setFilter(ProtobufUtil.toFilter(filter));
|
||||||
}
|
}
|
||||||
if (proto.hasBatchSize()) {
|
if (proto.hasBatchSize()) {
|
||||||
|
@ -1058,9 +1060,9 @@ public final class ProtobufUtil {
|
||||||
* @return the converted client Result
|
* @return the converted client Result
|
||||||
*/
|
*/
|
||||||
public static Result toResult(final ClientProtos.Result proto) {
|
public static Result toResult(final ClientProtos.Result proto) {
|
||||||
List<HBaseProtos.Cell> values = proto.getCellList();
|
List<CellProtos.Cell> values = proto.getCellList();
|
||||||
List<Cell> cells = new ArrayList<Cell>(values.size());
|
List<Cell> cells = new ArrayList<Cell>(values.size());
|
||||||
for (HBaseProtos.Cell c: values) {
|
for (CellProtos.Cell c: values) {
|
||||||
cells.add(toCell(c));
|
cells.add(toCell(c));
|
||||||
}
|
}
|
||||||
return new Result(cells);
|
return new Result(cells);
|
||||||
|
@ -1086,9 +1088,9 @@ public final class ProtobufUtil {
|
||||||
cells.add(scanner.current());
|
cells.add(scanner.current());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
List<HBaseProtos.Cell> values = proto.getCellList();
|
List<CellProtos.Cell> values = proto.getCellList();
|
||||||
if (cells == null) cells = new ArrayList<Cell>(values.size());
|
if (cells == null) cells = new ArrayList<Cell>(values.size());
|
||||||
for (HBaseProtos.Cell c: values) {
|
for (CellProtos.Cell c: values) {
|
||||||
cells.add(toCell(c));
|
cells.add(toCell(c));
|
||||||
}
|
}
|
||||||
return new Result(cells);
|
return new Result(cells);
|
||||||
|
@ -1139,7 +1141,7 @@ public final class ProtobufUtil {
|
||||||
* @return the converted Filter
|
* @return the converted Filter
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public static Filter toFilter(HBaseProtos.Filter proto) throws IOException {
|
public static Filter toFilter(FilterProtos.Filter proto) throws IOException {
|
||||||
String type = proto.getName();
|
String type = proto.getName();
|
||||||
final byte [] value = proto.getSerializedFilter().toByteArray();
|
final byte [] value = proto.getSerializedFilter().toByteArray();
|
||||||
String funcName = "parseFrom";
|
String funcName = "parseFrom";
|
||||||
|
@ -1162,8 +1164,8 @@ public final class ProtobufUtil {
|
||||||
* @param filter the Filter to convert
|
* @param filter the Filter to convert
|
||||||
* @return the converted protocol buffer Filter
|
* @return the converted protocol buffer Filter
|
||||||
*/
|
*/
|
||||||
public static HBaseProtos.Filter toFilter(Filter filter) throws IOException {
|
public static FilterProtos.Filter toFilter(Filter filter) throws IOException {
|
||||||
HBaseProtos.Filter.Builder builder = HBaseProtos.Filter.newBuilder();
|
FilterProtos.Filter.Builder builder = FilterProtos.Filter.newBuilder();
|
||||||
builder.setName(filter.getClass().getName());
|
builder.setName(filter.getClass().getName());
|
||||||
builder.setSerializedFilter(ByteString.copyFrom(filter.toByteArray()));
|
builder.setSerializedFilter(ByteString.copyFrom(filter.toByteArray()));
|
||||||
return builder.build();
|
return builder.build();
|
||||||
|
@ -1960,23 +1962,23 @@ public final class ProtobufUtil {
|
||||||
throw new IOException(se);
|
throw new IOException(se);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static HBaseProtos.Cell toCell(final Cell kv) {
|
public static CellProtos.Cell toCell(final Cell kv) {
|
||||||
// Doing this is going to kill us if we do it for all data passed.
|
// Doing this is going to kill us if we do it for all data passed.
|
||||||
// St.Ack 20121205
|
// St.Ack 20121205
|
||||||
HBaseProtos.Cell.Builder kvbuilder = HBaseProtos.Cell.newBuilder();
|
CellProtos.Cell.Builder kvbuilder = CellProtos.Cell.newBuilder();
|
||||||
kvbuilder.setRow(ByteString.copyFrom(kv.getRowArray(), kv.getRowOffset(),
|
kvbuilder.setRow(ByteString.copyFrom(kv.getRowArray(), kv.getRowOffset(),
|
||||||
kv.getRowLength()));
|
kv.getRowLength()));
|
||||||
kvbuilder.setFamily(ByteString.copyFrom(kv.getFamilyArray(),
|
kvbuilder.setFamily(ByteString.copyFrom(kv.getFamilyArray(),
|
||||||
kv.getFamilyOffset(), kv.getFamilyLength()));
|
kv.getFamilyOffset(), kv.getFamilyLength()));
|
||||||
kvbuilder.setQualifier(ByteString.copyFrom(kv.getQualifierArray(),
|
kvbuilder.setQualifier(ByteString.copyFrom(kv.getQualifierArray(),
|
||||||
kv.getQualifierOffset(), kv.getQualifierLength()));
|
kv.getQualifierOffset(), kv.getQualifierLength()));
|
||||||
kvbuilder.setCellType(HBaseProtos.CellType.valueOf(kv.getTypeByte()));
|
kvbuilder.setCellType(CellProtos.CellType.valueOf(kv.getTypeByte()));
|
||||||
kvbuilder.setTimestamp(kv.getTimestamp());
|
kvbuilder.setTimestamp(kv.getTimestamp());
|
||||||
kvbuilder.setValue(ByteString.copyFrom(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
|
kvbuilder.setValue(ByteString.copyFrom(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
|
||||||
return kvbuilder.build();
|
return kvbuilder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Cell toCell(final HBaseProtos.Cell cell) {
|
public static Cell toCell(final CellProtos.Cell cell) {
|
||||||
// Doing this is going to kill us if we do it for all data passed.
|
// Doing this is going to kill us if we do it for all data passed.
|
||||||
// St.Ack 20121205
|
// St.Ack 20121205
|
||||||
return CellUtil.createCell(cell.getRow().toByteArray(),
|
return CellUtil.createCell(cell.getRow().toByteArray(),
|
||||||
|
|
|
@ -141,8 +141,8 @@ public class MetaRegionTracker extends ZooKeeperNodeTracker {
|
||||||
HBaseProtos.ServerName pbsn =
|
HBaseProtos.ServerName pbsn =
|
||||||
HBaseProtos.ServerName.newBuilder().setHostName(sn.getHostname()).
|
HBaseProtos.ServerName.newBuilder().setHostName(sn.getHostname()).
|
||||||
setPort(sn.getPort()).setStartCode(sn.getStartcode()).build();
|
setPort(sn.getPort()).setStartCode(sn.getStartcode()).build();
|
||||||
ZooKeeperProtos.RootRegionServer pbrsr =
|
ZooKeeperProtos.MetaRegionServer pbrsr =
|
||||||
ZooKeeperProtos.RootRegionServer.newBuilder().setServer(pbsn).build();
|
ZooKeeperProtos.MetaRegionServer.newBuilder().setServer(pbsn).build();
|
||||||
return ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
|
return ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -530,8 +530,8 @@ public final class ClientProtos {
|
||||||
|
|
||||||
// optional .Filter filter = 4;
|
// optional .Filter filter = 4;
|
||||||
boolean hasFilter();
|
boolean hasFilter();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter();
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder();
|
||||||
|
|
||||||
// optional .TimeRange timeRange = 5;
|
// optional .TimeRange timeRange = 5;
|
||||||
boolean hasTimeRange();
|
boolean hasTimeRange();
|
||||||
|
@ -637,14 +637,14 @@ public final class ClientProtos {
|
||||||
|
|
||||||
// optional .Filter filter = 4;
|
// optional .Filter filter = 4;
|
||||||
public static final int FILTER_FIELD_NUMBER = 4;
|
public static final int FILTER_FIELD_NUMBER = 4;
|
||||||
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter filter_;
|
private org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter filter_;
|
||||||
public boolean hasFilter() {
|
public boolean hasFilter() {
|
||||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter() {
|
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter() {
|
||||||
return filter_;
|
return filter_;
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder() {
|
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder() {
|
||||||
return filter_;
|
return filter_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -705,7 +705,7 @@ public final class ClientProtos {
|
||||||
row_ = com.google.protobuf.ByteString.EMPTY;
|
row_ = com.google.protobuf.ByteString.EMPTY;
|
||||||
column_ = java.util.Collections.emptyList();
|
column_ = java.util.Collections.emptyList();
|
||||||
attribute_ = java.util.Collections.emptyList();
|
attribute_ = java.util.Collections.emptyList();
|
||||||
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
|
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
|
||||||
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
|
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
|
||||||
maxVersions_ = 1;
|
maxVersions_ = 1;
|
||||||
cacheBlocks_ = true;
|
cacheBlocks_ = true;
|
||||||
|
@ -1060,7 +1060,7 @@ public final class ClientProtos {
|
||||||
attributeBuilder_.clear();
|
attributeBuilder_.clear();
|
||||||
}
|
}
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
|
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
|
||||||
} else {
|
} else {
|
||||||
filterBuilder_.clear();
|
filterBuilder_.clear();
|
||||||
}
|
}
|
||||||
|
@ -1331,7 +1331,7 @@ public final class ClientProtos {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 34: {
|
case 34: {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.newBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.newBuilder();
|
||||||
if (hasFilter()) {
|
if (hasFilter()) {
|
||||||
subBuilder.mergeFrom(getFilter());
|
subBuilder.mergeFrom(getFilter());
|
||||||
}
|
}
|
||||||
|
@ -1771,20 +1771,20 @@ public final class ClientProtos {
|
||||||
}
|
}
|
||||||
|
|
||||||
// optional .Filter filter = 4;
|
// optional .Filter filter = 4;
|
||||||
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
|
private org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
|
||||||
private com.google.protobuf.SingleFieldBuilder<
|
private com.google.protobuf.SingleFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder> filterBuilder_;
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder> filterBuilder_;
|
||||||
public boolean hasFilter() {
|
public boolean hasFilter() {
|
||||||
return ((bitField0_ & 0x00000008) == 0x00000008);
|
return ((bitField0_ & 0x00000008) == 0x00000008);
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter() {
|
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter() {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
return filter_;
|
return filter_;
|
||||||
} else {
|
} else {
|
||||||
return filterBuilder_.getMessage();
|
return filterBuilder_.getMessage();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public Builder setFilter(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter value) {
|
public Builder setFilter(org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter value) {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
|
@ -1798,7 +1798,7 @@ public final class ClientProtos {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder setFilter(
|
public Builder setFilter(
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder builderForValue) {
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder builderForValue) {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
filter_ = builderForValue.build();
|
filter_ = builderForValue.build();
|
||||||
onChanged();
|
onChanged();
|
||||||
|
@ -1808,12 +1808,12 @@ public final class ClientProtos {
|
||||||
bitField0_ |= 0x00000008;
|
bitField0_ |= 0x00000008;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder mergeFilter(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter value) {
|
public Builder mergeFilter(org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter value) {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
if (((bitField0_ & 0x00000008) == 0x00000008) &&
|
if (((bitField0_ & 0x00000008) == 0x00000008) &&
|
||||||
filter_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance()) {
|
filter_ != org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance()) {
|
||||||
filter_ =
|
filter_ =
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.newBuilder(filter_).mergeFrom(value).buildPartial();
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.newBuilder(filter_).mergeFrom(value).buildPartial();
|
||||||
} else {
|
} else {
|
||||||
filter_ = value;
|
filter_ = value;
|
||||||
}
|
}
|
||||||
|
@ -1826,7 +1826,7 @@ public final class ClientProtos {
|
||||||
}
|
}
|
||||||
public Builder clearFilter() {
|
public Builder clearFilter() {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
|
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
|
||||||
onChanged();
|
onChanged();
|
||||||
} else {
|
} else {
|
||||||
filterBuilder_.clear();
|
filterBuilder_.clear();
|
||||||
|
@ -1834,12 +1834,12 @@ public final class ClientProtos {
|
||||||
bitField0_ = (bitField0_ & ~0x00000008);
|
bitField0_ = (bitField0_ & ~0x00000008);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder getFilterBuilder() {
|
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder getFilterBuilder() {
|
||||||
bitField0_ |= 0x00000008;
|
bitField0_ |= 0x00000008;
|
||||||
onChanged();
|
onChanged();
|
||||||
return getFilterFieldBuilder().getBuilder();
|
return getFilterFieldBuilder().getBuilder();
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder() {
|
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder() {
|
||||||
if (filterBuilder_ != null) {
|
if (filterBuilder_ != null) {
|
||||||
return filterBuilder_.getMessageOrBuilder();
|
return filterBuilder_.getMessageOrBuilder();
|
||||||
} else {
|
} else {
|
||||||
|
@ -1847,11 +1847,11 @@ public final class ClientProtos {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
private com.google.protobuf.SingleFieldBuilder<
|
private com.google.protobuf.SingleFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder>
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder>
|
||||||
getFilterFieldBuilder() {
|
getFilterFieldBuilder() {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
filterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
filterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder>(
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder>(
|
||||||
filter_,
|
filter_,
|
||||||
getParentForChildren(),
|
getParentForChildren(),
|
||||||
isClean());
|
isClean());
|
||||||
|
@ -2049,13 +2049,13 @@ public final class ClientProtos {
|
||||||
extends com.google.protobuf.MessageOrBuilder {
|
extends com.google.protobuf.MessageOrBuilder {
|
||||||
|
|
||||||
// repeated .Cell cell = 1;
|
// repeated .Cell cell = 1;
|
||||||
java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell>
|
java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell>
|
||||||
getCellList();
|
getCellList();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell getCell(int index);
|
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell getCell(int index);
|
||||||
int getCellCount();
|
int getCellCount();
|
||||||
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder>
|
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder>
|
||||||
getCellOrBuilderList();
|
getCellOrBuilderList();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder getCellOrBuilder(
|
org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder getCellOrBuilder(
|
||||||
int index);
|
int index);
|
||||||
|
|
||||||
// optional int32 associatedCellCount = 2;
|
// optional int32 associatedCellCount = 2;
|
||||||
|
@ -2093,21 +2093,21 @@ public final class ClientProtos {
|
||||||
private int bitField0_;
|
private int bitField0_;
|
||||||
// repeated .Cell cell = 1;
|
// repeated .Cell cell = 1;
|
||||||
public static final int CELL_FIELD_NUMBER = 1;
|
public static final int CELL_FIELD_NUMBER = 1;
|
||||||
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell> cell_;
|
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell> cell_;
|
||||||
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell> getCellList() {
|
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell> getCellList() {
|
||||||
return cell_;
|
return cell_;
|
||||||
}
|
}
|
||||||
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder>
|
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder>
|
||||||
getCellOrBuilderList() {
|
getCellOrBuilderList() {
|
||||||
return cell_;
|
return cell_;
|
||||||
}
|
}
|
||||||
public int getCellCount() {
|
public int getCellCount() {
|
||||||
return cell_.size();
|
return cell_.size();
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell getCell(int index) {
|
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell getCell(int index) {
|
||||||
return cell_.get(index);
|
return cell_.get(index);
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder getCellOrBuilder(
|
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder getCellOrBuilder(
|
||||||
int index) {
|
int index) {
|
||||||
return cell_.get(index);
|
return cell_.get(index);
|
||||||
}
|
}
|
||||||
|
@ -2461,7 +2461,7 @@ public final class ClientProtos {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 10: {
|
case 10: {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.newBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.newBuilder();
|
||||||
input.readMessage(subBuilder, extensionRegistry);
|
input.readMessage(subBuilder, extensionRegistry);
|
||||||
addCell(subBuilder.buildPartial());
|
addCell(subBuilder.buildPartial());
|
||||||
break;
|
break;
|
||||||
|
@ -2478,19 +2478,19 @@ public final class ClientProtos {
|
||||||
private int bitField0_;
|
private int bitField0_;
|
||||||
|
|
||||||
// repeated .Cell cell = 1;
|
// repeated .Cell cell = 1;
|
||||||
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell> cell_ =
|
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell> cell_ =
|
||||||
java.util.Collections.emptyList();
|
java.util.Collections.emptyList();
|
||||||
private void ensureCellIsMutable() {
|
private void ensureCellIsMutable() {
|
||||||
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
|
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||||
cell_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell>(cell_);
|
cell_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell>(cell_);
|
||||||
bitField0_ |= 0x00000001;
|
bitField0_ |= 0x00000001;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private com.google.protobuf.RepeatedFieldBuilder<
|
private com.google.protobuf.RepeatedFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder> cellBuilder_;
|
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder> cellBuilder_;
|
||||||
|
|
||||||
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell> getCellList() {
|
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell> getCellList() {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
return java.util.Collections.unmodifiableList(cell_);
|
return java.util.Collections.unmodifiableList(cell_);
|
||||||
} else {
|
} else {
|
||||||
|
@ -2504,7 +2504,7 @@ public final class ClientProtos {
|
||||||
return cellBuilder_.getCount();
|
return cellBuilder_.getCount();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell getCell(int index) {
|
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell getCell(int index) {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
return cell_.get(index);
|
return cell_.get(index);
|
||||||
} else {
|
} else {
|
||||||
|
@ -2512,7 +2512,7 @@ public final class ClientProtos {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public Builder setCell(
|
public Builder setCell(
|
||||||
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell value) {
|
int index, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell value) {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
|
@ -2526,7 +2526,7 @@ public final class ClientProtos {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder setCell(
|
public Builder setCell(
|
||||||
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder builderForValue) {
|
int index, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder builderForValue) {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
ensureCellIsMutable();
|
ensureCellIsMutable();
|
||||||
cell_.set(index, builderForValue.build());
|
cell_.set(index, builderForValue.build());
|
||||||
|
@ -2536,7 +2536,7 @@ public final class ClientProtos {
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder addCell(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell value) {
|
public Builder addCell(org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell value) {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
|
@ -2550,7 +2550,7 @@ public final class ClientProtos {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder addCell(
|
public Builder addCell(
|
||||||
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell value) {
|
int index, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell value) {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
|
@ -2564,7 +2564,7 @@ public final class ClientProtos {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder addCell(
|
public Builder addCell(
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder builderForValue) {
|
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder builderForValue) {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
ensureCellIsMutable();
|
ensureCellIsMutable();
|
||||||
cell_.add(builderForValue.build());
|
cell_.add(builderForValue.build());
|
||||||
|
@ -2575,7 +2575,7 @@ public final class ClientProtos {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder addCell(
|
public Builder addCell(
|
||||||
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder builderForValue) {
|
int index, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder builderForValue) {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
ensureCellIsMutable();
|
ensureCellIsMutable();
|
||||||
cell_.add(index, builderForValue.build());
|
cell_.add(index, builderForValue.build());
|
||||||
|
@ -2586,7 +2586,7 @@ public final class ClientProtos {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder addAllCell(
|
public Builder addAllCell(
|
||||||
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell> values) {
|
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell> values) {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
ensureCellIsMutable();
|
ensureCellIsMutable();
|
||||||
super.addAll(values, cell_);
|
super.addAll(values, cell_);
|
||||||
|
@ -2616,18 +2616,18 @@ public final class ClientProtos {
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder getCellBuilder(
|
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder getCellBuilder(
|
||||||
int index) {
|
int index) {
|
||||||
return getCellFieldBuilder().getBuilder(index);
|
return getCellFieldBuilder().getBuilder(index);
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder getCellOrBuilder(
|
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder getCellOrBuilder(
|
||||||
int index) {
|
int index) {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
return cell_.get(index); } else {
|
return cell_.get(index); } else {
|
||||||
return cellBuilder_.getMessageOrBuilder(index);
|
return cellBuilder_.getMessageOrBuilder(index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder>
|
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder>
|
||||||
getCellOrBuilderList() {
|
getCellOrBuilderList() {
|
||||||
if (cellBuilder_ != null) {
|
if (cellBuilder_ != null) {
|
||||||
return cellBuilder_.getMessageOrBuilderList();
|
return cellBuilder_.getMessageOrBuilderList();
|
||||||
|
@ -2635,25 +2635,25 @@ public final class ClientProtos {
|
||||||
return java.util.Collections.unmodifiableList(cell_);
|
return java.util.Collections.unmodifiableList(cell_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder addCellBuilder() {
|
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder addCellBuilder() {
|
||||||
return getCellFieldBuilder().addBuilder(
|
return getCellFieldBuilder().addBuilder(
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.getDefaultInstance());
|
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.getDefaultInstance());
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder addCellBuilder(
|
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder addCellBuilder(
|
||||||
int index) {
|
int index) {
|
||||||
return getCellFieldBuilder().addBuilder(
|
return getCellFieldBuilder().addBuilder(
|
||||||
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.getDefaultInstance());
|
index, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.getDefaultInstance());
|
||||||
}
|
}
|
||||||
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder>
|
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder>
|
||||||
getCellBuilderList() {
|
getCellBuilderList() {
|
||||||
return getCellFieldBuilder().getBuilderList();
|
return getCellFieldBuilder().getBuilderList();
|
||||||
}
|
}
|
||||||
private com.google.protobuf.RepeatedFieldBuilder<
|
private com.google.protobuf.RepeatedFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder>
|
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder>
|
||||||
getCellFieldBuilder() {
|
getCellFieldBuilder() {
|
||||||
if (cellBuilder_ == null) {
|
if (cellBuilder_ == null) {
|
||||||
cellBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
cellBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CellOrBuilder>(
|
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder>(
|
||||||
cell_,
|
cell_,
|
||||||
((bitField0_ & 0x00000001) == 0x00000001),
|
((bitField0_ & 0x00000001) == 0x00000001),
|
||||||
getParentForChildren(),
|
getParentForChildren(),
|
||||||
|
@ -10587,8 +10587,8 @@ public final class ClientProtos {
|
||||||
|
|
||||||
// optional .Filter filter = 5;
|
// optional .Filter filter = 5;
|
||||||
boolean hasFilter();
|
boolean hasFilter();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter();
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder();
|
||||||
|
|
||||||
// optional .TimeRange timeRange = 6;
|
// optional .TimeRange timeRange = 6;
|
||||||
boolean hasTimeRange();
|
boolean hasTimeRange();
|
||||||
|
@ -10724,14 +10724,14 @@ public final class ClientProtos {
|
||||||
|
|
||||||
// optional .Filter filter = 5;
|
// optional .Filter filter = 5;
|
||||||
public static final int FILTER_FIELD_NUMBER = 5;
|
public static final int FILTER_FIELD_NUMBER = 5;
|
||||||
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter filter_;
|
private org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter filter_;
|
||||||
public boolean hasFilter() {
|
public boolean hasFilter() {
|
||||||
return ((bitField0_ & 0x00000004) == 0x00000004);
|
return ((bitField0_ & 0x00000004) == 0x00000004);
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter() {
|
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter() {
|
||||||
return filter_;
|
return filter_;
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder() {
|
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder() {
|
||||||
return filter_;
|
return filter_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10843,7 +10843,7 @@ public final class ClientProtos {
|
||||||
attribute_ = java.util.Collections.emptyList();
|
attribute_ = java.util.Collections.emptyList();
|
||||||
startRow_ = com.google.protobuf.ByteString.EMPTY;
|
startRow_ = com.google.protobuf.ByteString.EMPTY;
|
||||||
stopRow_ = com.google.protobuf.ByteString.EMPTY;
|
stopRow_ = com.google.protobuf.ByteString.EMPTY;
|
||||||
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
|
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
|
||||||
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
|
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
|
||||||
maxVersions_ = 1;
|
maxVersions_ = 1;
|
||||||
cacheBlocks_ = true;
|
cacheBlocks_ = true;
|
||||||
|
@ -11297,7 +11297,7 @@ public final class ClientProtos {
|
||||||
stopRow_ = com.google.protobuf.ByteString.EMPTY;
|
stopRow_ = com.google.protobuf.ByteString.EMPTY;
|
||||||
bitField0_ = (bitField0_ & ~0x00000008);
|
bitField0_ = (bitField0_ & ~0x00000008);
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
|
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
|
||||||
} else {
|
} else {
|
||||||
filterBuilder_.clear();
|
filterBuilder_.clear();
|
||||||
}
|
}
|
||||||
|
@ -11621,7 +11621,7 @@ public final class ClientProtos {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 42: {
|
case 42: {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.newBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.newBuilder();
|
||||||
if (hasFilter()) {
|
if (hasFilter()) {
|
||||||
subBuilder.mergeFrom(getFilter());
|
subBuilder.mergeFrom(getFilter());
|
||||||
}
|
}
|
||||||
|
@ -12110,20 +12110,20 @@ public final class ClientProtos {
|
||||||
}
|
}
|
||||||
|
|
||||||
// optional .Filter filter = 5;
|
// optional .Filter filter = 5;
|
||||||
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
|
private org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
|
||||||
private com.google.protobuf.SingleFieldBuilder<
|
private com.google.protobuf.SingleFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder> filterBuilder_;
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder> filterBuilder_;
|
||||||
public boolean hasFilter() {
|
public boolean hasFilter() {
|
||||||
return ((bitField0_ & 0x00000010) == 0x00000010);
|
return ((bitField0_ & 0x00000010) == 0x00000010);
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter getFilter() {
|
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter() {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
return filter_;
|
return filter_;
|
||||||
} else {
|
} else {
|
||||||
return filterBuilder_.getMessage();
|
return filterBuilder_.getMessage();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public Builder setFilter(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter value) {
|
public Builder setFilter(org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter value) {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
|
@ -12137,7 +12137,7 @@ public final class ClientProtos {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder setFilter(
|
public Builder setFilter(
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder builderForValue) {
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder builderForValue) {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
filter_ = builderForValue.build();
|
filter_ = builderForValue.build();
|
||||||
onChanged();
|
onChanged();
|
||||||
|
@ -12147,12 +12147,12 @@ public final class ClientProtos {
|
||||||
bitField0_ |= 0x00000010;
|
bitField0_ |= 0x00000010;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder mergeFilter(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter value) {
|
public Builder mergeFilter(org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter value) {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
if (((bitField0_ & 0x00000010) == 0x00000010) &&
|
if (((bitField0_ & 0x00000010) == 0x00000010) &&
|
||||||
filter_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance()) {
|
filter_ != org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance()) {
|
||||||
filter_ =
|
filter_ =
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.newBuilder(filter_).mergeFrom(value).buildPartial();
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.newBuilder(filter_).mergeFrom(value).buildPartial();
|
||||||
} else {
|
} else {
|
||||||
filter_ = value;
|
filter_ = value;
|
||||||
}
|
}
|
||||||
|
@ -12165,7 +12165,7 @@ public final class ClientProtos {
|
||||||
}
|
}
|
||||||
public Builder clearFilter() {
|
public Builder clearFilter() {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
filter_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.getDefaultInstance();
|
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
|
||||||
onChanged();
|
onChanged();
|
||||||
} else {
|
} else {
|
||||||
filterBuilder_.clear();
|
filterBuilder_.clear();
|
||||||
|
@ -12173,12 +12173,12 @@ public final class ClientProtos {
|
||||||
bitField0_ = (bitField0_ & ~0x00000010);
|
bitField0_ = (bitField0_ & ~0x00000010);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder getFilterBuilder() {
|
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder getFilterBuilder() {
|
||||||
bitField0_ |= 0x00000010;
|
bitField0_ |= 0x00000010;
|
||||||
onChanged();
|
onChanged();
|
||||||
return getFilterFieldBuilder().getBuilder();
|
return getFilterFieldBuilder().getBuilder();
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder getFilterOrBuilder() {
|
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder() {
|
||||||
if (filterBuilder_ != null) {
|
if (filterBuilder_ != null) {
|
||||||
return filterBuilder_.getMessageOrBuilder();
|
return filterBuilder_.getMessageOrBuilder();
|
||||||
} else {
|
} else {
|
||||||
|
@ -12186,11 +12186,11 @@ public final class ClientProtos {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
private com.google.protobuf.SingleFieldBuilder<
|
private com.google.protobuf.SingleFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder>
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder>
|
||||||
getFilterFieldBuilder() {
|
getFilterFieldBuilder() {
|
||||||
if (filterBuilder_ == null) {
|
if (filterBuilder_ == null) {
|
||||||
filterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
filterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FilterOrBuilder>(
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder>(
|
||||||
filter_,
|
filter_,
|
||||||
getParentForChildren(),
|
getParentForChildren(),
|
||||||
isClean());
|
isClean());
|
||||||
|
@ -21554,99 +21554,100 @@ public final class ClientProtos {
|
||||||
descriptor;
|
descriptor;
|
||||||
static {
|
static {
|
||||||
java.lang.String[] descriptorData = {
|
java.lang.String[] descriptorData = {
|
||||||
"\n\014Client.proto\032\013hbase.proto\032\020Comparator." +
|
"\n\014Client.proto\032\013hbase.proto\032\014Filter.prot" +
|
||||||
"proto\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tquali" +
|
"o\032\nCell.proto\032\020Comparator.proto\"+\n\006Colum" +
|
||||||
"fier\030\002 \003(\014\"\342\001\n\003Get\022\013\n\003row\030\001 \002(\014\022\027\n\006colum" +
|
"n\022\016\n\006family\030\001 \002(\014\022\021\n\tqualifier\030\002 \003(\014\"\342\001\n" +
|
||||||
"n\030\002 \003(\0132\007.Column\022!\n\tattribute\030\003 \003(\0132\016.Na" +
|
"\003Get\022\013\n\003row\030\001 \002(\014\022\027\n\006column\030\002 \003(\0132\007.Colu" +
|
||||||
"meBytesPair\022\027\n\006filter\030\004 \001(\0132\007.Filter\022\035\n\t" +
|
"mn\022!\n\tattribute\030\003 \003(\0132\016.NameBytesPair\022\027\n" +
|
||||||
"timeRange\030\005 \001(\0132\n.TimeRange\022\026\n\013maxVersio" +
|
"\006filter\030\004 \001(\0132\007.Filter\022\035\n\ttimeRange\030\005 \001(" +
|
||||||
"ns\030\006 \001(\r:\0011\022\031\n\013cacheBlocks\030\007 \001(\010:\004true\022\022" +
|
"\0132\n.TimeRange\022\026\n\013maxVersions\030\006 \001(\r:\0011\022\031\n" +
|
||||||
"\n\nstoreLimit\030\010 \001(\r\022\023\n\013storeOffset\030\t \001(\r\"" +
|
"\013cacheBlocks\030\007 \001(\010:\004true\022\022\n\nstoreLimit\030\010" +
|
||||||
":\n\006Result\022\023\n\004cell\030\001 \003(\0132\005.Cell\022\033\n\023associ" +
|
" \001(\r\022\023\n\013storeOffset\030\t \001(\r\":\n\006Result\022\023\n\004c" +
|
||||||
"atedCellCount\030\002 \001(\005\"r\n\nGetRequest\022 \n\006reg",
|
"ell\030\001 \003(\0132\005.Cell\022\033\n\023associatedCellCount\030",
|
||||||
"ion\030\001 \002(\0132\020.RegionSpecifier\022\021\n\003get\030\002 \002(\013" +
|
"\002 \001(\005\"r\n\nGetRequest\022 \n\006region\030\001 \002(\0132\020.Re" +
|
||||||
"2\004.Get\022\030\n\020closestRowBefore\030\003 \001(\010\022\025\n\rexis" +
|
"gionSpecifier\022\021\n\003get\030\002 \002(\0132\004.Get\022\030\n\020clos" +
|
||||||
"tenceOnly\030\004 \001(\010\"w\n\017MultiGetRequest\022 \n\006re" +
|
"estRowBefore\030\003 \001(\010\022\025\n\rexistenceOnly\030\004 \001(" +
|
||||||
"gion\030\001 \002(\0132\020.RegionSpecifier\022\021\n\003get\030\002 \003(" +
|
"\010\"w\n\017MultiGetRequest\022 \n\006region\030\001 \002(\0132\020.R" +
|
||||||
"\0132\004.Get\022\030\n\020closestRowBefore\030\003 \001(\010\022\025\n\rexi" +
|
"egionSpecifier\022\021\n\003get\030\002 \003(\0132\004.Get\022\030\n\020clo" +
|
||||||
"stenceOnly\030\004 \001(\010\"6\n\013GetResponse\022\027\n\006resul" +
|
"sestRowBefore\030\003 \001(\010\022\025\n\rexistenceOnly\030\004 \001" +
|
||||||
"t\030\001 \001(\0132\007.Result\022\016\n\006exists\030\002 \001(\010\";\n\020Mult" +
|
"(\010\"6\n\013GetResponse\022\027\n\006result\030\001 \001(\0132\007.Resu" +
|
||||||
"iGetResponse\022\027\n\006result\030\001 \003(\0132\007.Result\022\016\n" +
|
"lt\022\016\n\006exists\030\002 \001(\010\";\n\020MultiGetResponse\022\027" +
|
||||||
"\006exists\030\002 \003(\010\"\177\n\tCondition\022\013\n\003row\030\001 \002(\014\022" +
|
"\n\006result\030\001 \003(\0132\007.Result\022\016\n\006exists\030\002 \003(\010\"" +
|
||||||
"\016\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022!\n\013co",
|
"\177\n\tCondition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(",
|
||||||
"mpareType\030\004 \002(\0162\014.CompareType\022\037\n\ncompara" +
|
"\014\022\021\n\tqualifier\030\003 \002(\014\022!\n\013compareType\030\004 \002(" +
|
||||||
"tor\030\005 \002(\0132\013.Comparator\"\365\005\n\rMutationProto" +
|
"\0162\014.CompareType\022\037\n\ncomparator\030\005 \002(\0132\013.Co" +
|
||||||
"\022\013\n\003row\030\001 \001(\014\022/\n\nmutateType\030\002 \001(\0162\033.Muta" +
|
"mparator\"\365\005\n\rMutationProto\022\013\n\003row\030\001 \001(\014\022" +
|
||||||
"tionProto.MutationType\022/\n\013columnValue\030\003 " +
|
"/\n\nmutateType\030\002 \001(\0162\033.MutationProto.Muta" +
|
||||||
"\003(\0132\032.MutationProto.ColumnValue\022\021\n\ttimes" +
|
"tionType\022/\n\013columnValue\030\003 \003(\0132\032.Mutation" +
|
||||||
"tamp\030\004 \001(\004\022!\n\tattribute\030\005 \003(\0132\016.NameByte" +
|
"Proto.ColumnValue\022\021\n\ttimestamp\030\004 \001(\004\022!\n\t" +
|
||||||
"sPair\022:\n\ndurability\030\006 \001(\0162\031.MutationProt" +
|
"attribute\030\005 \003(\0132\016.NameBytesPair\022:\n\ndurab" +
|
||||||
"o.Durability:\013USE_DEFAULT\022\035\n\ttimeRange\030\007" +
|
"ility\030\006 \001(\0162\031.MutationProto.Durability:\013" +
|
||||||
" \001(\0132\n.TimeRange\022\033\n\023associatedCellCount\030" +
|
"USE_DEFAULT\022\035\n\ttimeRange\030\007 \001(\0132\n.TimeRan" +
|
||||||
"\010 \001(\005\032\326\001\n\013ColumnValue\022\016\n\006family\030\001 \002(\014\022A\n",
|
"ge\022\033\n\023associatedCellCount\030\010 \001(\005\032\326\001\n\013Colu",
|
||||||
"\016qualifierValue\030\002 \003(\0132).MutationProto.Co" +
|
"mnValue\022\016\n\006family\030\001 \002(\014\022A\n\016qualifierValu" +
|
||||||
"lumnValue.QualifierValue\032t\n\016QualifierVal" +
|
"e\030\002 \003(\0132).MutationProto.ColumnValue.Qual" +
|
||||||
"ue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\t" +
|
"ifierValue\032t\n\016QualifierValue\022\021\n\tqualifie" +
|
||||||
"timestamp\030\003 \001(\004\022-\n\ndeleteType\030\004 \001(\0162\031.Mu" +
|
"r\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(" +
|
||||||
"tationProto.DeleteType\"W\n\nDurability\022\017\n\013" +
|
"\004\022-\n\ndeleteType\030\004 \001(\0162\031.MutationProto.De" +
|
||||||
"USE_DEFAULT\020\000\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_WAL" +
|
"leteType\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022" +
|
||||||
"\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014Mutat" +
|
"\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL" +
|
||||||
"ionType\022\n\n\006APPEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n\003PU" +
|
"\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014MutationType\022\n\n\006APP" +
|
||||||
"T\020\002\022\n\n\006DELETE\020\003\"U\n\nDeleteType\022\026\n\022DELETE_" +
|
"END\020\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020" +
|
||||||
"ONE_VERSION\020\000\022\034\n\030DELETE_MULTIPLE_VERSION",
|
"\003\"U\n\nDeleteType\022\026\n\022DELETE_ONE_VERSION\020\000\022",
|
||||||
"S\020\001\022\021\n\rDELETE_FAMILY\020\002\"r\n\rMutateRequest\022" +
|
"\034\n\030DELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_" +
|
||||||
" \n\006region\030\001 \002(\0132\020.RegionSpecifier\022 \n\010mut" +
|
"FAMILY\020\002\"r\n\rMutateRequest\022 \n\006region\030\001 \002(" +
|
||||||
"ation\030\002 \002(\0132\016.MutationProto\022\035\n\tcondition" +
|
"\0132\020.RegionSpecifier\022 \n\010mutation\030\002 \002(\0132\016." +
|
||||||
"\030\003 \001(\0132\n.Condition\"<\n\016MutateResponse\022\027\n\006" +
|
"MutationProto\022\035\n\tcondition\030\003 \001(\0132\n.Condi" +
|
||||||
"result\030\001 \001(\0132\007.Result\022\021\n\tprocessed\030\002 \001(\010" +
|
"tion\"<\n\016MutateResponse\022\027\n\006result\030\001 \001(\0132\007" +
|
||||||
"\"\362\002\n\004Scan\022\027\n\006column\030\001 \003(\0132\007.Column\022!\n\tat" +
|
".Result\022\021\n\tprocessed\030\002 \001(\010\"\362\002\n\004Scan\022\027\n\006c" +
|
||||||
"tribute\030\002 \003(\0132\016.NameBytesPair\022\020\n\010startRo" +
|
"olumn\030\001 \003(\0132\007.Column\022!\n\tattribute\030\002 \003(\0132" +
|
||||||
"w\030\003 \001(\014\022\017\n\007stopRow\030\004 \001(\014\022\027\n\006filter\030\005 \001(\013" +
|
"\016.NameBytesPair\022\020\n\010startRow\030\003 \001(\014\022\017\n\007sto" +
|
||||||
"2\007.Filter\022\035\n\ttimeRange\030\006 \001(\0132\n.TimeRange" +
|
"pRow\030\004 \001(\014\022\027\n\006filter\030\005 \001(\0132\007.Filter\022\035\n\tt" +
|
||||||
"\022\026\n\013maxVersions\030\007 \001(\r:\0011\022\031\n\013cacheBlocks\030",
|
"imeRange\030\006 \001(\0132\n.TimeRange\022\026\n\013maxVersion",
|
||||||
"\010 \001(\010:\004true\022\021\n\tbatchSize\030\t \001(\r\022\025\n\rmaxRes" +
|
"s\030\007 \001(\r:\0011\022\031\n\013cacheBlocks\030\010 \001(\010:\004true\022\021\n" +
|
||||||
"ultSize\030\n \001(\004\022\022\n\nstoreLimit\030\013 \001(\r\022\023\n\013sto" +
|
"\tbatchSize\030\t \001(\r\022\025\n\rmaxResultSize\030\n \001(\004\022" +
|
||||||
"reOffset\030\014 \001(\r\022\"\n\032loadColumnFamiliesOnDe" +
|
"\022\n\nstoreLimit\030\013 \001(\r\022\023\n\013storeOffset\030\014 \001(\r" +
|
||||||
"mand\030\r \001(\010\022\024\n\014cachingCount\030\016 \001(\r\022\023\n\013pref" +
|
"\022\"\n\032loadColumnFamiliesOnDemand\030\r \001(\010\022\024\n\014" +
|
||||||
"etching\030\017 \001(\010\"\230\001\n\013ScanRequest\022 \n\006region\030" +
|
"cachingCount\030\016 \001(\r\022\023\n\013prefetching\030\017 \001(\010\"" +
|
||||||
"\001 \001(\0132\020.RegionSpecifier\022\023\n\004scan\030\002 \001(\0132\005." +
|
"\230\001\n\013ScanRequest\022 \n\006region\030\001 \001(\0132\020.Region" +
|
||||||
"Scan\022\021\n\tscannerId\030\003 \001(\004\022\024\n\014numberOfRows\030" +
|
"Specifier\022\023\n\004scan\030\002 \001(\0132\005.Scan\022\021\n\tscanne" +
|
||||||
"\004 \001(\r\022\024\n\014closeScanner\030\005 \001(\010\022\023\n\013nextCallS" +
|
"rId\030\003 \001(\004\022\024\n\014numberOfRows\030\004 \001(\r\022\024\n\014close" +
|
||||||
"eq\030\006 \001(\004\"l\n\014ScanResponse\022\'\n\016resultCellMe" +
|
"Scanner\030\005 \001(\010\022\023\n\013nextCallSeq\030\006 \001(\004\"l\n\014Sc" +
|
||||||
"ta\030\001 \001(\0132\017.ResultCellMeta\022\021\n\tscannerId\030\002",
|
"anResponse\022\'\n\016resultCellMeta\030\001 \001(\0132\017.Res",
|
||||||
" \001(\004\022\023\n\013moreResults\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\"%" +
|
"ultCellMeta\022\021\n\tscannerId\030\002 \001(\004\022\023\n\013moreRe" +
|
||||||
"\n\016ResultCellMeta\022\023\n\013cellsLength\030\001 \003(\r\"\260\001" +
|
"sults\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\"%\n\016ResultCellMe" +
|
||||||
"\n\024BulkLoadHFileRequest\022 \n\006region\030\001 \002(\0132\020" +
|
"ta\022\023\n\013cellsLength\030\001 \003(\r\"\260\001\n\024BulkLoadHFil" +
|
||||||
".RegionSpecifier\0224\n\nfamilyPath\030\002 \003(\0132 .B" +
|
"eRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi" +
|
||||||
"ulkLoadHFileRequest.FamilyPath\022\024\n\014assign" +
|
"er\0224\n\nfamilyPath\030\002 \003(\0132 .BulkLoadHFileRe" +
|
||||||
"SeqNum\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006family\030\001 \002" +
|
"quest.FamilyPath\022\024\n\014assignSeqNum\030\003 \001(\010\032*" +
|
||||||
"(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileRespons" +
|
"\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002" +
|
||||||
"e\022\016\n\006loaded\030\001 \002(\010\"_\n\026CoprocessorServiceC" +
|
"(\t\"\'\n\025BulkLoadHFileResponse\022\016\n\006loaded\030\001 " +
|
||||||
"all\022\013\n\003row\030\001 \002(\014\022\023\n\013serviceName\030\002 \002(\t\022\022\n" +
|
"\002(\010\"_\n\026CoprocessorServiceCall\022\013\n\003row\030\001 \002" +
|
||||||
"\nmethodName\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"d\n\031Co",
|
"(\014\022\023\n\013serviceName\030\002 \002(\t\022\022\n\nmethodName\030\003 ",
|
||||||
"processorServiceRequest\022 \n\006region\030\001 \002(\0132" +
|
"\002(\t\022\017\n\007request\030\004 \002(\014\"d\n\031CoprocessorServi" +
|
||||||
"\020.RegionSpecifier\022%\n\004call\030\002 \002(\0132\027.Coproc" +
|
"ceRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif" +
|
||||||
"essorServiceCall\"]\n\032CoprocessorServiceRe" +
|
"ier\022%\n\004call\030\002 \002(\0132\027.CoprocessorServiceCa" +
|
||||||
"sponse\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier" +
|
"ll\"]\n\032CoprocessorServiceResponse\022 \n\006regi" +
|
||||||
"\022\035\n\005value\030\002 \002(\0132\016.NameBytesPair\"B\n\013Multi" +
|
"on\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005value\030\002 \002(" +
|
||||||
"Action\022 \n\010mutation\030\001 \001(\0132\016.MutationProto" +
|
"\0132\016.NameBytesPair\"B\n\013MultiAction\022 \n\010muta" +
|
||||||
"\022\021\n\003get\030\002 \001(\0132\004.Get\"I\n\014ActionResult\022\026\n\005v" +
|
"tion\030\001 \001(\0132\016.MutationProto\022\021\n\003get\030\002 \001(\0132" +
|
||||||
"alue\030\001 \001(\0132\007.Result\022!\n\texception\030\002 \001(\0132\016" +
|
"\004.Get\"I\n\014ActionResult\022\026\n\005value\030\001 \001(\0132\007.R" +
|
||||||
".NameBytesPair\"^\n\014MultiRequest\022 \n\006region" +
|
"esult\022!\n\texception\030\002 \001(\0132\016.NameBytesPair" +
|
||||||
"\030\001 \002(\0132\020.RegionSpecifier\022\034\n\006action\030\002 \003(\013",
|
"\"^\n\014MultiRequest\022 \n\006region\030\001 \002(\0132\020.Regio",
|
||||||
"2\014.MultiAction\022\016\n\006atomic\030\003 \001(\010\".\n\rMultiR" +
|
"nSpecifier\022\034\n\006action\030\002 \003(\0132\014.MultiAction" +
|
||||||
"esponse\022\035\n\006result\030\001 \003(\0132\r.ActionResult2\342" +
|
"\022\016\n\006atomic\030\003 \001(\010\".\n\rMultiResponse\022\035\n\006res" +
|
||||||
"\002\n\rClientService\022 \n\003get\022\013.GetRequest\032\014.G" +
|
"ult\030\001 \003(\0132\r.ActionResult2\342\002\n\rClientServi" +
|
||||||
"etResponse\022/\n\010multiGet\022\020.MultiGetRequest" +
|
"ce\022 \n\003get\022\013.GetRequest\032\014.GetResponse\022/\n\010" +
|
||||||
"\032\021.MultiGetResponse\022)\n\006mutate\022\016.MutateRe" +
|
"multiGet\022\020.MultiGetRequest\032\021.MultiGetRes" +
|
||||||
"quest\032\017.MutateResponse\022#\n\004scan\022\014.ScanReq" +
|
"ponse\022)\n\006mutate\022\016.MutateRequest\032\017.Mutate" +
|
||||||
"uest\032\r.ScanResponse\022>\n\rbulkLoadHFile\022\025.B" +
|
"Response\022#\n\004scan\022\014.ScanRequest\032\r.ScanRes" +
|
||||||
"ulkLoadHFileRequest\032\026.BulkLoadHFileRespo" +
|
"ponse\022>\n\rbulkLoadHFile\022\025.BulkLoadHFileRe" +
|
||||||
"nse\022F\n\013execService\022\032.CoprocessorServiceR" +
|
"quest\032\026.BulkLoadHFileResponse\022F\n\013execSer" +
|
||||||
"equest\032\033.CoprocessorServiceResponse\022&\n\005m",
|
"vice\022\032.CoprocessorServiceRequest\032\033.Copro",
|
||||||
"ulti\022\r.MultiRequest\032\016.MultiResponseBB\n*o" +
|
"cessorServiceResponse\022&\n\005multi\022\r.MultiRe" +
|
||||||
"rg.apache.hadoop.hbase.protobuf.generate" +
|
"quest\032\016.MultiResponseBB\n*org.apache.hado" +
|
||||||
"dB\014ClientProtosH\001\210\001\001\240\001\001"
|
"op.hbase.protobuf.generatedB\014ClientProto" +
|
||||||
|
"sH\001\210\001\001\240\001\001"
|
||||||
};
|
};
|
||||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||||
|
@ -21876,6 +21877,8 @@ public final class ClientProtos {
|
||||||
.internalBuildGeneratedFileFrom(descriptorData,
|
.internalBuildGeneratedFileFrom(descriptorData,
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
|
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
|
||||||
|
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.getDescriptor(),
|
||||||
|
org.apache.hadoop.hbase.protobuf.generated.CellProtos.getDescriptor(),
|
||||||
org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.getDescriptor(),
|
org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.getDescriptor(),
|
||||||
}, assigner);
|
}, assigner);
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1138,8 +1138,8 @@ public final class RegionServerStatusProtos {
|
||||||
|
|
||||||
// optional .ServerLoad load = 2;
|
// optional .ServerLoad load = 2;
|
||||||
boolean hasLoad();
|
boolean hasLoad();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad();
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder();
|
||||||
}
|
}
|
||||||
public static final class RegionServerReportRequest extends
|
public static final class RegionServerReportRequest extends
|
||||||
com.google.protobuf.GeneratedMessage
|
com.google.protobuf.GeneratedMessage
|
||||||
|
@ -1185,20 +1185,20 @@ public final class RegionServerStatusProtos {
|
||||||
|
|
||||||
// optional .ServerLoad load = 2;
|
// optional .ServerLoad load = 2;
|
||||||
public static final int LOAD_FIELD_NUMBER = 2;
|
public static final int LOAD_FIELD_NUMBER = 2;
|
||||||
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad load_;
|
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad load_;
|
||||||
public boolean hasLoad() {
|
public boolean hasLoad() {
|
||||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad() {
|
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad() {
|
||||||
return load_;
|
return load_;
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder() {
|
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder() {
|
||||||
return load_;
|
return load_;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void initFields() {
|
private void initFields() {
|
||||||
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
|
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
|
||||||
load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
|
load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
|
||||||
}
|
}
|
||||||
private byte memoizedIsInitialized = -1;
|
private byte memoizedIsInitialized = -1;
|
||||||
public final boolean isInitialized() {
|
public final boolean isInitialized() {
|
||||||
|
@ -1424,7 +1424,7 @@ public final class RegionServerStatusProtos {
|
||||||
}
|
}
|
||||||
bitField0_ = (bitField0_ & ~0x00000001);
|
bitField0_ = (bitField0_ & ~0x00000001);
|
||||||
if (loadBuilder_ == null) {
|
if (loadBuilder_ == null) {
|
||||||
load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
|
load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
|
||||||
} else {
|
} else {
|
||||||
loadBuilder_.clear();
|
loadBuilder_.clear();
|
||||||
}
|
}
|
||||||
|
@ -1560,7 +1560,7 @@ public final class RegionServerStatusProtos {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 18: {
|
case 18: {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.newBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder();
|
||||||
if (hasLoad()) {
|
if (hasLoad()) {
|
||||||
subBuilder.mergeFrom(getLoad());
|
subBuilder.mergeFrom(getLoad());
|
||||||
}
|
}
|
||||||
|
@ -1665,20 +1665,20 @@ public final class RegionServerStatusProtos {
|
||||||
}
|
}
|
||||||
|
|
||||||
// optional .ServerLoad load = 2;
|
// optional .ServerLoad load = 2;
|
||||||
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
|
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
|
||||||
private com.google.protobuf.SingleFieldBuilder<
|
private com.google.protobuf.SingleFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder> loadBuilder_;
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder> loadBuilder_;
|
||||||
public boolean hasLoad() {
|
public boolean hasLoad() {
|
||||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad() {
|
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad() {
|
||||||
if (loadBuilder_ == null) {
|
if (loadBuilder_ == null) {
|
||||||
return load_;
|
return load_;
|
||||||
} else {
|
} else {
|
||||||
return loadBuilder_.getMessage();
|
return loadBuilder_.getMessage();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public Builder setLoad(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad value) {
|
public Builder setLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
|
||||||
if (loadBuilder_ == null) {
|
if (loadBuilder_ == null) {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
|
@ -1692,7 +1692,7 @@ public final class RegionServerStatusProtos {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder setLoad(
|
public Builder setLoad(
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder builderForValue) {
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder builderForValue) {
|
||||||
if (loadBuilder_ == null) {
|
if (loadBuilder_ == null) {
|
||||||
load_ = builderForValue.build();
|
load_ = builderForValue.build();
|
||||||
onChanged();
|
onChanged();
|
||||||
|
@ -1702,12 +1702,12 @@ public final class RegionServerStatusProtos {
|
||||||
bitField0_ |= 0x00000002;
|
bitField0_ |= 0x00000002;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder mergeLoad(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad value) {
|
public Builder mergeLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
|
||||||
if (loadBuilder_ == null) {
|
if (loadBuilder_ == null) {
|
||||||
if (((bitField0_ & 0x00000002) == 0x00000002) &&
|
if (((bitField0_ & 0x00000002) == 0x00000002) &&
|
||||||
load_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance()) {
|
load_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) {
|
||||||
load_ =
|
load_ =
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial();
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial();
|
||||||
} else {
|
} else {
|
||||||
load_ = value;
|
load_ = value;
|
||||||
}
|
}
|
||||||
|
@ -1720,7 +1720,7 @@ public final class RegionServerStatusProtos {
|
||||||
}
|
}
|
||||||
public Builder clearLoad() {
|
public Builder clearLoad() {
|
||||||
if (loadBuilder_ == null) {
|
if (loadBuilder_ == null) {
|
||||||
load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
|
load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
|
||||||
onChanged();
|
onChanged();
|
||||||
} else {
|
} else {
|
||||||
loadBuilder_.clear();
|
loadBuilder_.clear();
|
||||||
|
@ -1728,12 +1728,12 @@ public final class RegionServerStatusProtos {
|
||||||
bitField0_ = (bitField0_ & ~0x00000002);
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder getLoadBuilder() {
|
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder getLoadBuilder() {
|
||||||
bitField0_ |= 0x00000002;
|
bitField0_ |= 0x00000002;
|
||||||
onChanged();
|
onChanged();
|
||||||
return getLoadFieldBuilder().getBuilder();
|
return getLoadFieldBuilder().getBuilder();
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder() {
|
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder() {
|
||||||
if (loadBuilder_ != null) {
|
if (loadBuilder_ != null) {
|
||||||
return loadBuilder_.getMessageOrBuilder();
|
return loadBuilder_.getMessageOrBuilder();
|
||||||
} else {
|
} else {
|
||||||
|
@ -1741,11 +1741,11 @@ public final class RegionServerStatusProtos {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
private com.google.protobuf.SingleFieldBuilder<
|
private com.google.protobuf.SingleFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder>
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder>
|
||||||
getLoadFieldBuilder() {
|
getLoadFieldBuilder() {
|
||||||
if (loadBuilder_ == null) {
|
if (loadBuilder_ == null) {
|
||||||
loadBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
loadBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder>(
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder>(
|
||||||
load_,
|
load_,
|
||||||
getParentForChildren(),
|
getParentForChildren(),
|
||||||
isClean());
|
isClean());
|
||||||
|
@ -4175,31 +4175,32 @@ public final class RegionServerStatusProtos {
|
||||||
descriptor;
|
descriptor;
|
||||||
static {
|
static {
|
||||||
java.lang.String[] descriptorData = {
|
java.lang.String[] descriptorData = {
|
||||||
"\n\030RegionServerStatus.proto\032\013hbase.proto\"" +
|
"\n\030RegionServerStatus.proto\032\013hbase.proto\032" +
|
||||||
"^\n\032RegionServerStartupRequest\022\014\n\004port\030\001 " +
|
"\023ClusterStatus.proto\"^\n\032RegionServerStar" +
|
||||||
"\002(\r\022\027\n\017serverStartCode\030\002 \002(\004\022\031\n\021serverCu" +
|
"tupRequest\022\014\n\004port\030\001 \002(\r\022\027\n\017serverStartC" +
|
||||||
"rrentTime\030\003 \002(\004\"B\n\033RegionServerStartupRe" +
|
"ode\030\002 \002(\004\022\031\n\021serverCurrentTime\030\003 \002(\004\"B\n\033" +
|
||||||
"sponse\022#\n\nmapEntries\030\001 \003(\0132\017.NameStringP" +
|
"RegionServerStartupResponse\022#\n\nmapEntrie" +
|
||||||
"air\"S\n\031RegionServerReportRequest\022\033\n\006serv" +
|
"s\030\001 \003(\0132\017.NameStringPair\"S\n\031RegionServer" +
|
||||||
"er\030\001 \002(\0132\013.ServerName\022\031\n\004load\030\002 \001(\0132\013.Se" +
|
"ReportRequest\022\033\n\006server\030\001 \002(\0132\013.ServerNa" +
|
||||||
"rverLoad\"\034\n\032RegionServerReportResponse\"N" +
|
"me\022\031\n\004load\030\002 \001(\0132\013.ServerLoad\"\034\n\032RegionS" +
|
||||||
"\n\031ReportRSFatalErrorRequest\022\033\n\006server\030\001 " +
|
"erverReportResponse\"N\n\031ReportRSFatalErro" +
|
||||||
"\002(\0132\013.ServerName\022\024\n\014errorMessage\030\002 \002(\t\"\034",
|
"rRequest\022\033\n\006server\030\001 \002(\0132\013.ServerName\022\024\n",
|
||||||
"\n\032ReportRSFatalErrorResponse\"5\n\037GetLastF" +
|
"\014errorMessage\030\002 \002(\t\"\034\n\032ReportRSFatalErro" +
|
||||||
"lushedSequenceIdRequest\022\022\n\nregionName\030\001 " +
|
"rResponse\"5\n\037GetLastFlushedSequenceIdReq" +
|
||||||
"\002(\014\"A\n GetLastFlushedSequenceIdResponse\022" +
|
"uest\022\022\n\nregionName\030\001 \002(\014\"A\n GetLastFlush" +
|
||||||
"\035\n\025lastFlushedSequenceId\030\001 \002(\0042\354\002\n\031Regio" +
|
"edSequenceIdResponse\022\035\n\025lastFlushedSeque" +
|
||||||
"nServerStatusService\022P\n\023regionServerStar" +
|
"nceId\030\001 \002(\0042\354\002\n\031RegionServerStatusServic" +
|
||||||
"tup\022\033.RegionServerStartupRequest\032\034.Regio" +
|
"e\022P\n\023regionServerStartup\022\033.RegionServerS" +
|
||||||
"nServerStartupResponse\022M\n\022regionServerRe" +
|
"tartupRequest\032\034.RegionServerStartupRespo" +
|
||||||
"port\022\032.RegionServerReportRequest\032\033.Regio" +
|
"nse\022M\n\022regionServerReport\022\032.RegionServer" +
|
||||||
"nServerReportResponse\022M\n\022reportRSFatalEr" +
|
"ReportRequest\032\033.RegionServerReportRespon" +
|
||||||
"ror\022\032.ReportRSFatalErrorRequest\032\033.Report",
|
"se\022M\n\022reportRSFatalError\022\032.ReportRSFatal",
|
||||||
"RSFatalErrorResponse\022_\n\030getLastFlushedSe" +
|
"ErrorRequest\032\033.ReportRSFatalErrorRespons" +
|
||||||
"quenceId\022 .GetLastFlushedSequenceIdReque" +
|
"e\022_\n\030getLastFlushedSequenceId\022 .GetLastF" +
|
||||||
"st\032!.GetLastFlushedSequenceIdResponseBN\n" +
|
"lushedSequenceIdRequest\032!.GetLastFlushed" +
|
||||||
"*org.apache.hadoop.hbase.protobuf.genera" +
|
"SequenceIdResponseBN\n*org.apache.hadoop." +
|
||||||
"tedB\030RegionServerStatusProtosH\001\210\001\001\240\001\001"
|
"hbase.protobuf.generatedB\030RegionServerSt" +
|
||||||
|
"atusProtosH\001\210\001\001\240\001\001"
|
||||||
};
|
};
|
||||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||||
|
@ -4277,6 +4278,7 @@ public final class RegionServerStatusProtos {
|
||||||
.internalBuildGeneratedFileFrom(descriptorData,
|
.internalBuildGeneratedFileFrom(descriptorData,
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
|
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
|
||||||
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(),
|
||||||
}, assigner);
|
}, assigner);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ public final class ZooKeeperProtos {
|
||||||
public static void registerAllExtensions(
|
public static void registerAllExtensions(
|
||||||
com.google.protobuf.ExtensionRegistry registry) {
|
com.google.protobuf.ExtensionRegistry registry) {
|
||||||
}
|
}
|
||||||
public interface RootRegionServerOrBuilder
|
public interface MetaRegionServerOrBuilder
|
||||||
extends com.google.protobuf.MessageOrBuilder {
|
extends com.google.protobuf.MessageOrBuilder {
|
||||||
|
|
||||||
// required .ServerName server = 1;
|
// required .ServerName server = 1;
|
||||||
|
@ -16,32 +16,32 @@ public final class ZooKeeperProtos {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
|
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
|
||||||
}
|
}
|
||||||
public static final class RootRegionServer extends
|
public static final class MetaRegionServer extends
|
||||||
com.google.protobuf.GeneratedMessage
|
com.google.protobuf.GeneratedMessage
|
||||||
implements RootRegionServerOrBuilder {
|
implements MetaRegionServerOrBuilder {
|
||||||
// Use RootRegionServer.newBuilder() to construct.
|
// Use MetaRegionServer.newBuilder() to construct.
|
||||||
private RootRegionServer(Builder builder) {
|
private MetaRegionServer(Builder builder) {
|
||||||
super(builder);
|
super(builder);
|
||||||
}
|
}
|
||||||
private RootRegionServer(boolean noInit) {}
|
private MetaRegionServer(boolean noInit) {}
|
||||||
|
|
||||||
private static final RootRegionServer defaultInstance;
|
private static final MetaRegionServer defaultInstance;
|
||||||
public static RootRegionServer getDefaultInstance() {
|
public static MetaRegionServer getDefaultInstance() {
|
||||||
return defaultInstance;
|
return defaultInstance;
|
||||||
}
|
}
|
||||||
|
|
||||||
public RootRegionServer getDefaultInstanceForType() {
|
public MetaRegionServer getDefaultInstanceForType() {
|
||||||
return defaultInstance;
|
return defaultInstance;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final com.google.protobuf.Descriptors.Descriptor
|
public static final com.google.protobuf.Descriptors.Descriptor
|
||||||
getDescriptor() {
|
getDescriptor() {
|
||||||
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor;
|
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_descriptor;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||||
internalGetFieldAccessorTable() {
|
internalGetFieldAccessorTable() {
|
||||||
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable;
|
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_fieldAccessorTable;
|
||||||
}
|
}
|
||||||
|
|
||||||
private int bitField0_;
|
private int bitField0_;
|
||||||
|
@ -114,10 +114,10 @@ public final class ZooKeeperProtos {
|
||||||
if (obj == this) {
|
if (obj == this) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)) {
|
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)) {
|
||||||
return super.equals(obj);
|
return super.equals(obj);
|
||||||
}
|
}
|
||||||
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) obj;
|
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) obj;
|
||||||
|
|
||||||
boolean result = true;
|
boolean result = true;
|
||||||
result = result && (hasServer() == other.hasServer());
|
result = result && (hasServer() == other.hasServer());
|
||||||
|
@ -142,41 +142,41 @@ public final class ZooKeeperProtos {
|
||||||
return hash;
|
return hash;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
|
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
|
||||||
com.google.protobuf.ByteString data)
|
com.google.protobuf.ByteString data)
|
||||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||||
return newBuilder().mergeFrom(data).buildParsed();
|
return newBuilder().mergeFrom(data).buildParsed();
|
||||||
}
|
}
|
||||||
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
|
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
|
||||||
com.google.protobuf.ByteString data,
|
com.google.protobuf.ByteString data,
|
||||||
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||||
.buildParsed();
|
.buildParsed();
|
||||||
}
|
}
|
||||||
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(byte[] data)
|
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(byte[] data)
|
||||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||||
return newBuilder().mergeFrom(data).buildParsed();
|
return newBuilder().mergeFrom(data).buildParsed();
|
||||||
}
|
}
|
||||||
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
|
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
|
||||||
byte[] data,
|
byte[] data,
|
||||||
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||||
.buildParsed();
|
.buildParsed();
|
||||||
}
|
}
|
||||||
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(java.io.InputStream input)
|
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(java.io.InputStream input)
|
||||||
throws java.io.IOException {
|
throws java.io.IOException {
|
||||||
return newBuilder().mergeFrom(input).buildParsed();
|
return newBuilder().mergeFrom(input).buildParsed();
|
||||||
}
|
}
|
||||||
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
|
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
|
||||||
java.io.InputStream input,
|
java.io.InputStream input,
|
||||||
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
throws java.io.IOException {
|
throws java.io.IOException {
|
||||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||||
.buildParsed();
|
.buildParsed();
|
||||||
}
|
}
|
||||||
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom(java.io.InputStream input)
|
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(java.io.InputStream input)
|
||||||
throws java.io.IOException {
|
throws java.io.IOException {
|
||||||
Builder builder = newBuilder();
|
Builder builder = newBuilder();
|
||||||
if (builder.mergeDelimitedFrom(input)) {
|
if (builder.mergeDelimitedFrom(input)) {
|
||||||
|
@ -185,7 +185,7 @@ public final class ZooKeeperProtos {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom(
|
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(
|
||||||
java.io.InputStream input,
|
java.io.InputStream input,
|
||||||
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
throws java.io.IOException {
|
throws java.io.IOException {
|
||||||
|
@ -196,12 +196,12 @@ public final class ZooKeeperProtos {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
|
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
|
||||||
com.google.protobuf.CodedInputStream input)
|
com.google.protobuf.CodedInputStream input)
|
||||||
throws java.io.IOException {
|
throws java.io.IOException {
|
||||||
return newBuilder().mergeFrom(input).buildParsed();
|
return newBuilder().mergeFrom(input).buildParsed();
|
||||||
}
|
}
|
||||||
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
|
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
|
||||||
com.google.protobuf.CodedInputStream input,
|
com.google.protobuf.CodedInputStream input,
|
||||||
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
||||||
throws java.io.IOException {
|
throws java.io.IOException {
|
||||||
|
@ -211,7 +211,7 @@ public final class ZooKeeperProtos {
|
||||||
|
|
||||||
public static Builder newBuilder() { return Builder.create(); }
|
public static Builder newBuilder() { return Builder.create(); }
|
||||||
public Builder newBuilderForType() { return newBuilder(); }
|
public Builder newBuilderForType() { return newBuilder(); }
|
||||||
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer prototype) {
|
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer prototype) {
|
||||||
return newBuilder().mergeFrom(prototype);
|
return newBuilder().mergeFrom(prototype);
|
||||||
}
|
}
|
||||||
public Builder toBuilder() { return newBuilder(this); }
|
public Builder toBuilder() { return newBuilder(this); }
|
||||||
|
@ -224,18 +224,18 @@ public final class ZooKeeperProtos {
|
||||||
}
|
}
|
||||||
public static final class Builder extends
|
public static final class Builder extends
|
||||||
com.google.protobuf.GeneratedMessage.Builder<Builder>
|
com.google.protobuf.GeneratedMessage.Builder<Builder>
|
||||||
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServerOrBuilder {
|
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServerOrBuilder {
|
||||||
public static final com.google.protobuf.Descriptors.Descriptor
|
public static final com.google.protobuf.Descriptors.Descriptor
|
||||||
getDescriptor() {
|
getDescriptor() {
|
||||||
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor;
|
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_descriptor;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||||
internalGetFieldAccessorTable() {
|
internalGetFieldAccessorTable() {
|
||||||
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable;
|
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_fieldAccessorTable;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.newBuilder()
|
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.newBuilder()
|
||||||
private Builder() {
|
private Builder() {
|
||||||
maybeForceBuilderInitialization();
|
maybeForceBuilderInitialization();
|
||||||
}
|
}
|
||||||
|
@ -270,24 +270,24 @@ public final class ZooKeeperProtos {
|
||||||
|
|
||||||
public com.google.protobuf.Descriptors.Descriptor
|
public com.google.protobuf.Descriptors.Descriptor
|
||||||
getDescriptorForType() {
|
getDescriptorForType() {
|
||||||
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDescriptor();
|
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDescriptor();
|
||||||
}
|
}
|
||||||
|
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer getDefaultInstanceForType() {
|
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer getDefaultInstanceForType() {
|
||||||
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance();
|
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance();
|
||||||
}
|
}
|
||||||
|
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer build() {
|
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer build() {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial();
|
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = buildPartial();
|
||||||
if (!result.isInitialized()) {
|
if (!result.isInitialized()) {
|
||||||
throw newUninitializedMessageException(result);
|
throw newUninitializedMessageException(result);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildParsed()
|
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer buildParsed()
|
||||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial();
|
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = buildPartial();
|
||||||
if (!result.isInitialized()) {
|
if (!result.isInitialized()) {
|
||||||
throw newUninitializedMessageException(
|
throw newUninitializedMessageException(
|
||||||
result).asInvalidProtocolBufferException();
|
result).asInvalidProtocolBufferException();
|
||||||
|
@ -295,8 +295,8 @@ public final class ZooKeeperProtos {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildPartial() {
|
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer buildPartial() {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer(this);
|
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer(this);
|
||||||
int from_bitField0_ = bitField0_;
|
int from_bitField0_ = bitField0_;
|
||||||
int to_bitField0_ = 0;
|
int to_bitField0_ = 0;
|
||||||
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
||||||
|
@ -313,16 +313,16 @@ public final class ZooKeeperProtos {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||||
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) {
|
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) {
|
||||||
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)other);
|
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)other);
|
||||||
} else {
|
} else {
|
||||||
super.mergeFrom(other);
|
super.mergeFrom(other);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other) {
|
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other) {
|
||||||
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance()) return this;
|
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance()) return this;
|
||||||
if (other.hasServer()) {
|
if (other.hasServer()) {
|
||||||
mergeServer(other.getServer());
|
mergeServer(other.getServer());
|
||||||
}
|
}
|
||||||
|
@ -470,15 +470,15 @@ public final class ZooKeeperProtos {
|
||||||
return serverBuilder_;
|
return serverBuilder_;
|
||||||
}
|
}
|
||||||
|
|
||||||
// @@protoc_insertion_point(builder_scope:RootRegionServer)
|
// @@protoc_insertion_point(builder_scope:MetaRegionServer)
|
||||||
}
|
}
|
||||||
|
|
||||||
static {
|
static {
|
||||||
defaultInstance = new RootRegionServer(true);
|
defaultInstance = new MetaRegionServer(true);
|
||||||
defaultInstance.initFields();
|
defaultInstance.initFields();
|
||||||
}
|
}
|
||||||
|
|
||||||
// @@protoc_insertion_point(class_scope:RootRegionServer)
|
// @@protoc_insertion_point(class_scope:MetaRegionServer)
|
||||||
}
|
}
|
||||||
|
|
||||||
public interface MasterOrBuilder
|
public interface MasterOrBuilder
|
||||||
|
@ -6868,10 +6868,10 @@ public final class ZooKeeperProtos {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static com.google.protobuf.Descriptors.Descriptor
|
private static com.google.protobuf.Descriptors.Descriptor
|
||||||
internal_static_RootRegionServer_descriptor;
|
internal_static_MetaRegionServer_descriptor;
|
||||||
private static
|
private static
|
||||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||||
internal_static_RootRegionServer_fieldAccessorTable;
|
internal_static_MetaRegionServer_fieldAccessorTable;
|
||||||
private static com.google.protobuf.Descriptors.Descriptor
|
private static com.google.protobuf.Descriptors.Descriptor
|
||||||
internal_static_Master_descriptor;
|
internal_static_Master_descriptor;
|
||||||
private static
|
private static
|
||||||
|
@ -6941,7 +6941,7 @@ public final class ZooKeeperProtos {
|
||||||
descriptor;
|
descriptor;
|
||||||
static {
|
static {
|
||||||
java.lang.String[] descriptorData = {
|
java.lang.String[] descriptorData = {
|
||||||
"\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020RootRe" +
|
"\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020MetaRe" +
|
||||||
"gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\"" +
|
"gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\"" +
|
||||||
"%\n\006Master\022\033\n\006master\030\001 \002(\0132\013.ServerName\"\036" +
|
"%\n\006Master\022\033\n\006master\030\001 \002(\0132\013.ServerName\"\036" +
|
||||||
"\n\tClusterUp\022\021\n\tstartDate\030\001 \002(\t\"\203\001\n\020Regio" +
|
"\n\tClusterUp\022\021\n\tstartDate\030\001 \002(\t\"\203\001\n\020Regio" +
|
||||||
|
@ -6976,14 +6976,14 @@ public final class ZooKeeperProtos {
|
||||||
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
||||||
com.google.protobuf.Descriptors.FileDescriptor root) {
|
com.google.protobuf.Descriptors.FileDescriptor root) {
|
||||||
descriptor = root;
|
descriptor = root;
|
||||||
internal_static_RootRegionServer_descriptor =
|
internal_static_MetaRegionServer_descriptor =
|
||||||
getDescriptor().getMessageTypes().get(0);
|
getDescriptor().getMessageTypes().get(0);
|
||||||
internal_static_RootRegionServer_fieldAccessorTable = new
|
internal_static_MetaRegionServer_fieldAccessorTable = new
|
||||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||||
internal_static_RootRegionServer_descriptor,
|
internal_static_MetaRegionServer_descriptor,
|
||||||
new java.lang.String[] { "Server", },
|
new java.lang.String[] { "Server", },
|
||||||
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.class,
|
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.class,
|
||||||
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.Builder.class);
|
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder.class);
|
||||||
internal_static_Master_descriptor =
|
internal_static_Master_descriptor =
|
||||||
getDescriptor().getMessageTypes().get(1);
|
getDescriptor().getMessageTypes().get(1);
|
||||||
internal_static_Master_fieldAccessorTable = new
|
internal_static_Master_fieldAccessorTable = new
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Cell and KeyValue protos
|
||||||
|
|
||||||
|
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
|
||||||
|
option java_outer_classname = "CellProtos";
|
||||||
|
option java_generate_equals_and_hash = true;
|
||||||
|
option optimize_for = SPEED;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The type of the key in a Cell
|
||||||
|
*/
|
||||||
|
enum CellType {
|
||||||
|
MINIMUM = 0;
|
||||||
|
PUT = 4;
|
||||||
|
|
||||||
|
DELETE = 8;
|
||||||
|
DELETE_COLUMN = 12;
|
||||||
|
DELETE_FAMILY = 14;
|
||||||
|
|
||||||
|
// MAXIMUM is used when searching; you look from maximum on down.
|
||||||
|
MAXIMUM = 255;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protocol buffer version of Cell.
|
||||||
|
*/
|
||||||
|
message Cell {
|
||||||
|
optional bytes row = 1;
|
||||||
|
optional bytes family = 2;
|
||||||
|
optional bytes qualifier = 3;
|
||||||
|
optional uint64 timestamp = 4;
|
||||||
|
optional CellType cellType = 5;
|
||||||
|
optional bytes value = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protocol buffer version of KeyValue.
|
||||||
|
* It doesn't have those transient parameters
|
||||||
|
*/
|
||||||
|
message KeyValue {
|
||||||
|
required bytes row = 1;
|
||||||
|
required bytes family = 2;
|
||||||
|
required bytes qualifier = 3;
|
||||||
|
optional uint64 timestamp = 4;
|
||||||
|
optional CellType keyType = 5;
|
||||||
|
optional bytes value = 6;
|
||||||
|
}
|
|
@ -25,6 +25,8 @@ option java_generate_equals_and_hash = true;
|
||||||
option optimize_for = SPEED;
|
option optimize_for = SPEED;
|
||||||
|
|
||||||
import "hbase.proto";
|
import "hbase.proto";
|
||||||
|
import "Filter.proto";
|
||||||
|
import "Cell.proto";
|
||||||
import "Comparator.proto";
|
import "Comparator.proto";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -51,11 +51,110 @@ message RegionInTransition {
|
||||||
required RegionState regionState = 2;
|
required RegionState regionState = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message RegionLoad {
|
||||||
|
/** the region specifier */
|
||||||
|
required RegionSpecifier regionSpecifier = 1;
|
||||||
|
|
||||||
|
/** the number of stores for the region */
|
||||||
|
optional uint32 stores = 2;
|
||||||
|
|
||||||
|
/** the number of storefiles for the region */
|
||||||
|
optional uint32 storefiles = 3;
|
||||||
|
|
||||||
|
/** the total size of the store files for the region, uncompressed, in MB */
|
||||||
|
optional uint32 storeUncompressedSizeMB = 4;
|
||||||
|
|
||||||
|
/** the current total size of the store files for the region, in MB */
|
||||||
|
optional uint32 storefileSizeMB = 5;
|
||||||
|
|
||||||
|
/** the current size of the memstore for the region, in MB */
|
||||||
|
optional uint32 memstoreSizeMB = 6;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The current total size of root-level store file indexes for the region,
|
||||||
|
* in MB. The same as {@link #rootIndexSizeKB} but in MB.
|
||||||
|
*/
|
||||||
|
optional uint32 storefileIndexSizeMB = 7;
|
||||||
|
|
||||||
|
/** the current total read requests made to region */
|
||||||
|
optional uint64 readRequestsCount = 8;
|
||||||
|
|
||||||
|
/** the current total write requests made to region */
|
||||||
|
optional uint64 writeRequestsCount = 9;
|
||||||
|
|
||||||
|
/** the total compacting key values in currently running compaction */
|
||||||
|
optional uint64 totalCompactingKVs = 10;
|
||||||
|
|
||||||
|
/** the completed count of key values in currently running compaction */
|
||||||
|
optional uint64 currentCompactedKVs = 11;
|
||||||
|
|
||||||
|
/** The current total size of root-level indexes for the region, in KB. */
|
||||||
|
optional uint32 rootIndexSizeKB = 12;
|
||||||
|
|
||||||
|
/** The total size of all index blocks, not just the root level, in KB. */
|
||||||
|
optional uint32 totalStaticIndexSizeKB = 13;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The total size of all Bloom filter blocks, not just loaded into the
|
||||||
|
* block cache, in KB.
|
||||||
|
*/
|
||||||
|
optional uint32 totalStaticBloomSizeKB = 14;
|
||||||
|
|
||||||
|
/** the most recent sequence Id from cache flush */
|
||||||
|
optional uint64 completeSequenceId = 15;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Server-level protobufs */
|
||||||
|
|
||||||
|
message ServerLoad {
|
||||||
|
/** Number of requests since last report. */
|
||||||
|
optional uint32 numberOfRequests = 1;
|
||||||
|
|
||||||
|
/** Total Number of requests from the start of the region server. */
|
||||||
|
optional uint32 totalNumberOfRequests = 2;
|
||||||
|
|
||||||
|
/** the amount of used heap, in MB. */
|
||||||
|
optional uint32 usedHeapMB = 3;
|
||||||
|
|
||||||
|
/** the maximum allowable size of the heap, in MB. */
|
||||||
|
optional uint32 maxHeapMB = 4;
|
||||||
|
|
||||||
|
/** Information on the load of individual regions. */
|
||||||
|
repeated RegionLoad regionLoads = 5;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Regionserver-level coprocessors, e.g., WALObserver implementations.
|
||||||
|
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
|
||||||
|
* objects.
|
||||||
|
*/
|
||||||
|
repeated Coprocessor coprocessors = 6;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Time when incremental (non-total) counts began being calculated (e.g. numberOfRequests)
|
||||||
|
* time is measured as the difference, measured in milliseconds, between the current time
|
||||||
|
* and midnight, January 1, 1970 UTC.
|
||||||
|
*/
|
||||||
|
optional uint64 reportStartTime = 7;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Time when report was generated.
|
||||||
|
* time is measured as the difference, measured in milliseconds, between the current time
|
||||||
|
* and midnight, January 1, 1970 UTC.
|
||||||
|
*/
|
||||||
|
optional uint64 reportEndTime = 8;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The port number that this region server is hosing an info server on.
|
||||||
|
*/
|
||||||
|
optional uint32 infoServerPort = 9;
|
||||||
|
}
|
||||||
|
|
||||||
message LiveServerInfo {
|
message LiveServerInfo {
|
||||||
required ServerName server = 1;
|
required ServerName server = 1;
|
||||||
required ServerLoad serverLoad = 2;
|
required ServerLoad serverLoad = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
message ClusterStatus {
|
message ClusterStatus {
|
||||||
optional HBaseVersionFileContent hbaseVersion = 1;
|
optional HBaseVersionFileContent hbaseVersion = 1;
|
||||||
repeated LiveServerInfo liveServers = 2;
|
repeated LiveServerInfo liveServers = 2;
|
||||||
|
|
|
@ -27,6 +27,11 @@ option optimize_for = SPEED;
|
||||||
import "hbase.proto";
|
import "hbase.proto";
|
||||||
import "Comparator.proto";
|
import "Comparator.proto";
|
||||||
|
|
||||||
|
message Filter {
|
||||||
|
required string name = 1;
|
||||||
|
optional bytes serializedFilter = 2;
|
||||||
|
}
|
||||||
|
|
||||||
message ColumnCountGetFilter {
|
message ColumnCountGetFilter {
|
||||||
required int32 limit = 1;
|
required int32 limit = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ option java_generate_equals_and_hash = true;
|
||||||
option optimize_for = SPEED;
|
option optimize_for = SPEED;
|
||||||
|
|
||||||
import "hbase.proto";
|
import "hbase.proto";
|
||||||
|
import "ClusterStatus.proto";
|
||||||
|
|
||||||
message RegionServerStartupRequest {
|
message RegionServerStartupRequest {
|
||||||
/** Port number this regionserver is up on */
|
/** Port number this regionserver is up on */
|
||||||
|
|
|
@ -28,10 +28,10 @@ option optimize_for = SPEED;
|
||||||
import "hbase.proto";
|
import "hbase.proto";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Content of the root-region-server znode.
|
* Content of the meta-region-server znode.
|
||||||
*/
|
*/
|
||||||
message RootRegionServer {
|
message MetaRegionServer {
|
||||||
// The ServerName hosting the root region currently.
|
// The ServerName hosting the meta region currently.
|
||||||
required ServerName server = 1;
|
required ServerName server = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,32 +23,7 @@ option java_outer_classname = "HBaseProtos";
|
||||||
option java_generate_equals_and_hash = true;
|
option java_generate_equals_and_hash = true;
|
||||||
option optimize_for = SPEED;
|
option optimize_for = SPEED;
|
||||||
|
|
||||||
/**
|
import "Cell.proto";
|
||||||
* The type of the key in a Cell
|
|
||||||
*/
|
|
||||||
enum CellType {
|
|
||||||
MINIMUM = 0;
|
|
||||||
PUT = 4;
|
|
||||||
|
|
||||||
DELETE = 8;
|
|
||||||
DELETE_COLUMN = 12;
|
|
||||||
DELETE_FAMILY = 14;
|
|
||||||
|
|
||||||
// MAXIMUM is used when searching; you look from maximum on down.
|
|
||||||
MAXIMUM = 255;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Protocol buffer version of Cell.
|
|
||||||
*/
|
|
||||||
message Cell {
|
|
||||||
optional bytes row = 1;
|
|
||||||
optional bytes family = 2;
|
|
||||||
optional bytes qualifier = 3;
|
|
||||||
optional uint64 timestamp = 4;
|
|
||||||
optional CellType cellType = 5;
|
|
||||||
optional bytes value = 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Table Schema
|
* Table Schema
|
||||||
|
@ -110,104 +85,6 @@ message RegionSpecifier {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
message RegionLoad {
|
|
||||||
/** the region specifier */
|
|
||||||
required RegionSpecifier regionSpecifier = 1;
|
|
||||||
|
|
||||||
/** the number of stores for the region */
|
|
||||||
optional uint32 stores = 2;
|
|
||||||
|
|
||||||
/** the number of storefiles for the region */
|
|
||||||
optional uint32 storefiles = 3;
|
|
||||||
|
|
||||||
/** the total size of the store files for the region, uncompressed, in MB */
|
|
||||||
optional uint32 storeUncompressedSizeMB = 4;
|
|
||||||
|
|
||||||
/** the current total size of the store files for the region, in MB */
|
|
||||||
optional uint32 storefileSizeMB = 5;
|
|
||||||
|
|
||||||
/** the current size of the memstore for the region, in MB */
|
|
||||||
optional uint32 memstoreSizeMB = 6;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The current total size of root-level store file indexes for the region,
|
|
||||||
* in MB. The same as {@link #rootIndexSizeKB} but in MB.
|
|
||||||
*/
|
|
||||||
optional uint32 storefileIndexSizeMB = 7;
|
|
||||||
|
|
||||||
/** the current total read requests made to region */
|
|
||||||
optional uint64 readRequestsCount = 8;
|
|
||||||
|
|
||||||
/** the current total write requests made to region */
|
|
||||||
optional uint64 writeRequestsCount = 9;
|
|
||||||
|
|
||||||
/** the total compacting key values in currently running compaction */
|
|
||||||
optional uint64 totalCompactingKVs = 10;
|
|
||||||
|
|
||||||
/** the completed count of key values in currently running compaction */
|
|
||||||
optional uint64 currentCompactedKVs = 11;
|
|
||||||
|
|
||||||
/** The current total size of root-level indexes for the region, in KB. */
|
|
||||||
optional uint32 rootIndexSizeKB = 12;
|
|
||||||
|
|
||||||
/** The total size of all index blocks, not just the root level, in KB. */
|
|
||||||
optional uint32 totalStaticIndexSizeKB = 13;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The total size of all Bloom filter blocks, not just loaded into the
|
|
||||||
* block cache, in KB.
|
|
||||||
*/
|
|
||||||
optional uint32 totalStaticBloomSizeKB = 14;
|
|
||||||
|
|
||||||
/** the most recent sequence Id from cache flush */
|
|
||||||
optional uint64 completeSequenceId = 15;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Server-level protobufs */
|
|
||||||
|
|
||||||
message ServerLoad {
|
|
||||||
/** Number of requests since last report. */
|
|
||||||
optional uint32 numberOfRequests = 1;
|
|
||||||
|
|
||||||
/** Total Number of requests from the start of the region server. */
|
|
||||||
optional uint32 totalNumberOfRequests = 2;
|
|
||||||
|
|
||||||
/** the amount of used heap, in MB. */
|
|
||||||
optional uint32 usedHeapMB = 3;
|
|
||||||
|
|
||||||
/** the maximum allowable size of the heap, in MB. */
|
|
||||||
optional uint32 maxHeapMB = 4;
|
|
||||||
|
|
||||||
/** Information on the load of individual regions. */
|
|
||||||
repeated RegionLoad regionLoads = 5;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Regionserver-level coprocessors, e.g., WALObserver implementations.
|
|
||||||
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
|
|
||||||
* objects.
|
|
||||||
*/
|
|
||||||
repeated Coprocessor coprocessors = 6;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Time when incremental (non-total) counts began being calculated (e.g. numberOfRequests)
|
|
||||||
* time is measured as the difference, measured in milliseconds, between the current time
|
|
||||||
* and midnight, January 1, 1970 UTC.
|
|
||||||
*/
|
|
||||||
optional uint64 reportStartTime = 7;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Time when report was generated.
|
|
||||||
* time is measured as the difference, measured in milliseconds, between the current time
|
|
||||||
* and midnight, January 1, 1970 UTC.
|
|
||||||
*/
|
|
||||||
optional uint64 reportEndTime = 8;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The port number that this region server is hosing an info server on.
|
|
||||||
*/
|
|
||||||
optional uint32 infoServerPort = 9;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A range of time. Both from and to are Java time
|
* A range of time. Both from and to are Java time
|
||||||
* stamp in milliseconds. If you don't specify a time
|
* stamp in milliseconds. If you don't specify a time
|
||||||
|
@ -219,11 +96,6 @@ message TimeRange {
|
||||||
optional uint64 to = 2;
|
optional uint64 to = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Filter {
|
|
||||||
required string name = 1;
|
|
||||||
optional bytes serializedFilter = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Comparison operators */
|
/* Comparison operators */
|
||||||
enum CompareType {
|
enum CompareType {
|
||||||
LESS = 0;
|
LESS = 0;
|
||||||
|
@ -235,19 +107,6 @@ enum CompareType {
|
||||||
NO_OP = 6;
|
NO_OP = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Protocol buffer version of KeyValue.
|
|
||||||
* It doesn't have those transient parameters
|
|
||||||
*/
|
|
||||||
message KeyValue {
|
|
||||||
required bytes row = 1;
|
|
||||||
required bytes family = 2;
|
|
||||||
required bytes qualifier = 3;
|
|
||||||
optional uint64 timestamp = 4;
|
|
||||||
optional CellType keyType = 5;
|
|
||||||
optional bytes value = 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Protocol buffer version of ServerName
|
* Protocol buffer version of ServerName
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -30,7 +30,7 @@ org.apache.hadoop.hbase.ServerName;
|
||||||
org.apache.hadoop.hbase.HBaseConfiguration;
|
org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
|
||||||
</%import>
|
</%import>
|
||||||
<%if format.equals("json") %>
|
<%if format.equals("json") %>
|
||||||
<& ../common/TaskMonitorTmpl; filter = filter; format = "json" &>
|
<& ../common/TaskMonitorTmpl; filter = filter; format = "json" &>
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
org.apache.hadoop.hbase.HBaseConfiguration;
|
org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
|
||||||
</%import>
|
</%import>
|
||||||
<%if (onlineRegions != null && onlineRegions.size() > 0) %>
|
<%if (onlineRegions != null && onlineRegions.size() > 0) %>
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ org.apache.hadoop.hbase.ServerName;
|
||||||
org.apache.hadoop.hbase.HBaseConfiguration;
|
org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
|
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
|
||||||
org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram;
|
org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram;
|
||||||
org.apache.hadoop.util.StringUtils;
|
org.apache.hadoop.util.StringUtils;
|
||||||
com.yammer.metrics.stats.Snapshot;
|
com.yammer.metrics.stats.Snapshot;
|
||||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.codec.BaseDecoder;
|
import org.apache.hadoop.hbase.codec.BaseDecoder;
|
||||||
import org.apache.hadoop.hbase.codec.BaseEncoder;
|
import org.apache.hadoop.hbase.codec.BaseEncoder;
|
||||||
import org.apache.hadoop.hbase.codec.Codec;
|
import org.apache.hadoop.hbase.codec.Codec;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
|
||||||
|
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
@ -47,7 +47,7 @@ public class MessageCodec implements Codec {
|
||||||
@Override
|
@Override
|
||||||
public void write(Cell cell) throws IOException {
|
public void write(Cell cell) throws IOException {
|
||||||
checkFlushed();
|
checkFlushed();
|
||||||
HBaseProtos.Cell.Builder builder = HBaseProtos.Cell.newBuilder();
|
CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder();
|
||||||
// This copies bytes from Cell to ByteString. I don't see anyway around the copy.
|
// This copies bytes from Cell to ByteString. I don't see anyway around the copy.
|
||||||
// ByteString is final.
|
// ByteString is final.
|
||||||
builder.setRow(ByteString.copyFrom(cell.getRowArray(), cell.getRowOffset(),
|
builder.setRow(ByteString.copyFrom(cell.getRowArray(), cell.getRowOffset(),
|
||||||
|
@ -57,10 +57,10 @@ public class MessageCodec implements Codec {
|
||||||
builder.setQualifier(ByteString.copyFrom(cell.getQualifierArray(), cell.getQualifierOffset(),
|
builder.setQualifier(ByteString.copyFrom(cell.getQualifierArray(), cell.getQualifierOffset(),
|
||||||
cell.getQualifierLength()));
|
cell.getQualifierLength()));
|
||||||
builder.setTimestamp(cell.getTimestamp());
|
builder.setTimestamp(cell.getTimestamp());
|
||||||
builder.setCellType(HBaseProtos.CellType.valueOf(cell.getTypeByte()));
|
builder.setCellType(CellProtos.CellType.valueOf(cell.getTypeByte()));
|
||||||
builder.setValue(ByteString.copyFrom(cell.getValueArray(), cell.getValueOffset(),
|
builder.setValue(ByteString.copyFrom(cell.getValueArray(), cell.getValueOffset(),
|
||||||
cell.getValueLength()));
|
cell.getValueLength()));
|
||||||
HBaseProtos.Cell pbcell = builder.build();
|
CellProtos.Cell pbcell = builder.build();
|
||||||
pbcell.writeDelimitedTo(this.out);
|
pbcell.writeDelimitedTo(this.out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,7 @@ public class MessageCodec implements Codec {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected Cell parseCell() throws IOException {
|
protected Cell parseCell() throws IOException {
|
||||||
HBaseProtos.Cell pbcell = HBaseProtos.Cell.parseDelimitedFrom(this.in);
|
CellProtos.Cell pbcell = CellProtos.Cell.parseDelimitedFrom(this.in);
|
||||||
return CellUtil.createCell(pbcell.getRow().toByteArray(),
|
return CellUtil.createCell(pbcell.getRow().toByteArray(),
|
||||||
pbcell.getFamily().toByteArray(), pbcell.getQualifier().toByteArray(),
|
pbcell.getFamily().toByteArray(), pbcell.getQualifier().toByteArray(),
|
||||||
pbcell.getTimestamp(), (byte)pbcell.getCellType().getNumber(),
|
pbcell.getTimestamp(), (byte)pbcell.getCellType().getNumber(),
|
||||||
|
|
|
@ -102,7 +102,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
||||||
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
|
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||||
|
@ -1244,7 +1244,7 @@ MasterServices, Server {
|
||||||
public RegionServerReportResponse regionServerReport(
|
public RegionServerReportResponse regionServerReport(
|
||||||
RpcController controller, RegionServerReportRequest request) throws ServiceException {
|
RpcController controller, RegionServerReportRequest request) throws ServiceException {
|
||||||
try {
|
try {
|
||||||
HBaseProtos.ServerLoad sl = request.getLoad();
|
ClusterStatusProtos.ServerLoad sl = request.getLoad();
|
||||||
this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl));
|
this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl));
|
||||||
if (sl != null && this.metricsMaster != null) {
|
if (sl != null && this.metricsMaster != null) {
|
||||||
// Up our metrics.
|
// Up our metrics.
|
||||||
|
|
|
@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.client.HConnection;
|
||||||
import org.apache.hadoop.hbase.client.HConnectionManager;
|
import org.apache.hadoop.hbase.client.HConnectionManager;
|
||||||
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
|
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
|
||||||
import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
|
import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
|
||||||
import org.apache.hadoop.hbase.exceptions.PleaseHoldException;
|
|
||||||
import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
|
import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
|
||||||
import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
|
import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
|
||||||
import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
|
import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
|
||||||
|
|
|
@ -170,10 +170,10 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Mut
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
|
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
|
||||||
|
@ -978,7 +978,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
|
||||||
|
|
||||||
void tryRegionServerReport(long reportStartTime, long reportEndTime)
|
void tryRegionServerReport(long reportStartTime, long reportEndTime)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HBaseProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
|
ClusterStatusProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
|
||||||
try {
|
try {
|
||||||
RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
|
RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
|
||||||
ServerName sn = ServerName.parseVersionedServerName(
|
ServerName sn = ServerName.parseVersionedServerName(
|
||||||
|
@ -1000,7 +1000,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
HBaseProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
|
ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
|
||||||
// We're getting the MetricsRegionServerWrapper here because the wrapper computes requests
|
// We're getting the MetricsRegionServerWrapper here because the wrapper computes requests
|
||||||
// per second, and other metrics As long as metrics are part of ServerLoad it's best to use
|
// per second, and other metrics As long as metrics are part of ServerLoad it's best to use
|
||||||
// the wrapper to compute those numbers in one place.
|
// the wrapper to compute those numbers in one place.
|
||||||
|
@ -1013,7 +1013,8 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
|
||||||
MemoryUsage memory =
|
MemoryUsage memory =
|
||||||
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||||
|
|
||||||
HBaseProtos.ServerLoad.Builder serverLoad = HBaseProtos.ServerLoad.newBuilder();
|
ClusterStatusProtos.ServerLoad.Builder serverLoad =
|
||||||
|
ClusterStatusProtos.ServerLoad.newBuilder();
|
||||||
serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond());
|
serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond());
|
||||||
serverLoad.setTotalNumberOfRequests((int) regionServerWrapper.getTotalRequestCount());
|
serverLoad.setTotalNumberOfRequests((int) regionServerWrapper.getTotalRequestCount());
|
||||||
serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024));
|
serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024));
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
@ -56,7 +57,7 @@ public class TestServerLoad {
|
||||||
assertTrue(slToString.contains("coprocessors=[]"));
|
assertTrue(slToString.contains("coprocessors=[]"));
|
||||||
}
|
}
|
||||||
|
|
||||||
private HBaseProtos.ServerLoad createServerLoadProto() {
|
private ClusterStatusProtos.ServerLoad createServerLoadProto() {
|
||||||
HBaseProtos.RegionSpecifier rSpecOne =
|
HBaseProtos.RegionSpecifier rSpecOne =
|
||||||
HBaseProtos.RegionSpecifier.newBuilder()
|
HBaseProtos.RegionSpecifier.newBuilder()
|
||||||
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
|
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
|
||||||
|
@ -66,17 +67,18 @@ public class TestServerLoad {
|
||||||
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
|
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
|
||||||
.setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
|
.setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
|
||||||
|
|
||||||
HBaseProtos.RegionLoad rlOne =
|
ClusterStatusProtos.RegionLoad rlOne =
|
||||||
HBaseProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
|
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
|
||||||
.setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
|
.setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
|
||||||
.setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).build();
|
.setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).build();
|
||||||
HBaseProtos.RegionLoad rlTwo =
|
ClusterStatusProtos.RegionLoad rlTwo =
|
||||||
HBaseProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
|
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
|
||||||
.setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
|
.setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
|
||||||
.setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).build();
|
.setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).build();
|
||||||
|
|
||||||
HBaseProtos.ServerLoad sl =
|
ClusterStatusProtos.ServerLoad sl =
|
||||||
HBaseProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).addRegionLoads(rlTwo).build();
|
ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
|
||||||
|
addRegionLoads(rlTwo).build();
|
||||||
return sl;
|
return sl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,23 +18,17 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.filter;
|
package org.apache.hadoop.hbase.filter;
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import static org.junit.Assert.assertTrue;
|
||||||
import java.io.ByteArrayOutputStream;
|
|
||||||
import java.io.DataInputStream;
|
|
||||||
import java.io.DataOutputStream;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.SmallTests;
|
import org.apache.hadoop.hbase.SmallTests;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test for the ColumnPaginationFilter, used mainly to test the successful serialization of the filter.
|
* Test for the ColumnPaginationFilter, used mainly to test the successful serialization of the filter.
|
||||||
* More test functionality can be found within {@link org.apache.hadoop.hbase.filter.TestFilter#testColumnPaginationFilter()}
|
* More test functionality can be found within {@link org.apache.hadoop.hbase.filter.TestFilter#testColumnPaginationFilter()}
|
||||||
|
@ -65,7 +59,7 @@ public class TestColumnPaginationFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
private Filter serializationTest(Filter filter) throws Exception {
|
private Filter serializationTest(Filter filter) throws Exception {
|
||||||
HBaseProtos.Filter filterProto = ProtobufUtil.toFilter(filter);
|
FilterProtos.Filter filterProto = ProtobufUtil.toFilter(filter);
|
||||||
Filter newFilter = ProtobufUtil.toFilter(filterProto);
|
Filter newFilter = ProtobufUtil.toFilter(filterProto);
|
||||||
|
|
||||||
return newFilter;
|
return newFilter;
|
||||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.MediumTests;
|
import org.apache.hadoop.hbase.MediumTests;
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
|
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
|
||||||
|
@ -72,7 +72,7 @@ public class TestMasterMetrics {
|
||||||
HRegionServer rs = cluster.getRegionServer(0);
|
HRegionServer rs = cluster.getRegionServer(0);
|
||||||
request.setServer(ProtobufUtil.toServerName(rs.getServerName()));
|
request.setServer(ProtobufUtil.toServerName(rs.getServerName()));
|
||||||
|
|
||||||
HBaseProtos.ServerLoad sl = HBaseProtos.ServerLoad.newBuilder()
|
ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
|
||||||
.setTotalNumberOfRequests(10000)
|
.setTotalNumberOfRequests(10000)
|
||||||
.build();
|
.build();
|
||||||
master.getMetrics().getMetricsSource().init();
|
master.getMetrics().getMetricsSource().init();
|
||||||
|
|
Loading…
Reference in New Issue