From f7c1418d4bb887b305db9bb22dfbb1d3feeebafa Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 3 May 2012 06:20:26 +0000 Subject: [PATCH] HBASE-5444 Add PB-based calls to HMasterRegionInterface git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1333319 13f79535-47bb-0310-9956-ffa450edef68 --- .../hbase/tmpl/master/MasterStatusTmpl.jamon | 13 +- .../tmpl/regionserver/RSStatusTmpl.jamon | 5 +- .../apache/hadoop/hbase/ClusterStatus.java | 45 +- .../org/apache/hadoop/hbase/ServerLoad.java | 155 + .../hadoop/hbase/ipc/HBaseRpcMetrics.java | 3 +- .../apache/hadoop/hbase/ipc/HBaseServer.java | 5 + .../hbase/ipc/HMasterRegionInterface.java | 78 - .../apache/hadoop/hbase/ipc/Invocation.java | 5 + .../hbase/ipc/RegionServerStatusProtocol.java | 39 + .../hbase/master/AssignmentManager.java | 5 +- .../apache/hadoop/hbase/master/HMaster.java | 106 +- .../apache/hadoop/hbase/master/MXBean.java | 4 +- .../hadoop/hbase/master/MXBeanImpl.java | 8 +- .../hbase/master/MasterDumpServlet.java | 6 +- .../hadoop/hbase/master/ServerManager.java | 31 +- .../hadoop/hbase/protobuf/ProtobufUtil.java | 14 + .../hbase/protobuf/generated/HBaseProtos.java | 3280 +++++++++++++++- .../generated/RegionServerStatusProtos.java | 3427 +++++++++++++++++ .../hbase/regionserver/HRegionServer.java | 145 +- src/main/protobuf/RegionServerStatus.proto | 85 + src/main/protobuf/hbase.proto | 83 + .../resources/hbase-webapps/master/table.jsp | 18 +- .../apache/hadoop/hbase/MiniHBaseCluster.java | 4 +- .../hbase/coprocessor/TestClassLoading.java | 26 +- .../hbase/master/TestAssignmentManager.java | 8 +- .../hadoop/hbase/master/TestMXBean.java | 6 +- .../hbase/master/TestMasterNoCluster.java | 36 +- .../TestServerCustomProtocol.java | 1 - 28 files changed, 7387 insertions(+), 254 deletions(-) create mode 100644 src/main/java/org/apache/hadoop/hbase/ServerLoad.java delete mode 100644 src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java create mode 100644 src/main/java/org/apache/hadoop/hbase/ipc/RegionServerStatusProtocol.java create mode 100644 src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java create mode 100644 src/main/protobuf/RegionServerStatus.proto diff --git a/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 69434f7dd86..6bc2cf2b0f0 100644 --- a/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -37,7 +37,7 @@ org.apache.hadoop.hbase.util.JvmVersion; org.apache.hadoop.hbase.util.FSUtils; org.apache.hadoop.hbase.master.HMaster; org.apache.hadoop.hbase.HConstants; -org.apache.hadoop.hbase.HServerLoad; +org.apache.hadoop.hbase.ServerLoad; org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.client.HBaseAdmin; org.apache.hadoop.hbase.client.HConnectionManager; @@ -213,11 +213,12 @@ org.apache.hadoop.hbase.HBaseConfiguration; // or be set to 0 to get ephemeral ports int infoPort = master.getConfiguration().getInt("hbase.regionserver.info.port", 60030); String url = "http://" + serverName.getHostname() + ":" + infoPort + "/"; - HServerLoad hsl = master.getServerManager().getLoad(serverName); - String loadStr = hsl == null? "-": hsl.toString(); - if (hsl != null) { - totalRegions += hsl.getNumberOfRegions(); - totalRequests += hsl.getNumberOfRequests(); + ServerLoad sl = master.getServerManager().getLoad(serverName); + String loadStr = sl == null? "-": sl.toString(); + if (sl != null) { + totalRegions += sl.getRegionLoadsCount(); + // Is this correct? Adding a rate to a measure. + totalRequests += sl.getRequestsPerSecond(); } long startcode = serverName.getStartcode(); diff --git a/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon index 3c7c091d868..87f04f45961 100644 --- a/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon +++ b/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon @@ -32,7 +32,8 @@ org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics; org.apache.hadoop.hbase.util.Bytes; org.apache.hadoop.hbase.HConstants; org.apache.hadoop.hbase.HServerInfo; -org.apache.hadoop.hbase.HServerLoad; +org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad; +org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad; org.apache.hadoop.hbase.HRegionInfo; org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.HBaseConfiguration; @@ -118,7 +119,7 @@ String url = "http://" + host + "/"; <%for HRegionInfo r: onlineRegions %> <%java> - HServerLoad.RegionLoad load = regionServer.createRegionLoad(r.getEncodedName()); + RegionLoad load = regionServer.createRegionLoad(r.getEncodedName()); <% r.getRegionNameAsString() %> <% Bytes.toStringBinary(r.getStartKey()) %><% Bytes.toStringBinary(r.getEndKey()) %> diff --git a/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index 5d7f07bf641..ccbb9ab90d8 100644 --- a/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -29,10 +29,15 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.TreeMap; - +import java.util.HashSet; +import org.apache.hadoop.hbase.HServerLoad; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HServerLoad.RegionLoad; import org.apache.hadoop.hbase.master.AssignmentManager.RegionState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.VersionMismatchException; @@ -89,14 +94,48 @@ public class ClusterStatus extends VersionedWritable { } public ClusterStatus(final String hbaseVersion, final String clusterid, - final Map servers, + final Map servers, final Collection deadServers, final ServerName master, final Collection backupMasters, final Map rit, final String[] masterCoprocessors) { this.hbaseVersion = hbaseVersion; - this.liveServers = servers; + + // TODO: This conversion of ServerLoad to HServerLoad is temporary, + // will be cleaned up in HBASE-5445. Using the ClusterStatus proto brings + // in a lot of other changes, so it makes sense to break this up. + Map convertedLoad = + new HashMap(); + for (Map.Entry entry : servers.entrySet()) { + ServerLoad sl = entry.getValue(); + + Map regionLoad = new HashMap(); + for (HBaseProtos.RegionLoad rl : sl.getRegionLoadsList()) { + Set regionCoprocessors = new HashSet(); + for (HBaseProtos.Coprocessor coprocessor + : rl.getCoprocessorsList()) { + regionCoprocessors.add(coprocessor.getName()); + } + + byte [] regionName = rl.getRegionSpecifier().getValue().toByteArray(); + RegionLoad converted = new RegionLoad(regionName, + rl.getStores(),rl.getStorefiles(),rl.getStoreUncompressedSizeMB(), + rl.getStorefileSizeMB(),rl.getMemstoreSizeMB(), + rl.getStorefileIndexSizeMB(),rl.getRootIndexSizeKB(), + rl.getTotalStaticIndexSizeKB(),rl.getTotalStaticBloomSizeKB(), + rl.getReadRequestsCount(),rl.getWriteRequestsCount(), + rl.getTotalCompactingKVs(),rl.getCurrentCompactedKVs(), + regionCoprocessors); + regionLoad.put(regionName, converted); + } + + HServerLoad hsl = new HServerLoad(sl.getTotalNumberOfRequests(), + sl.getRequestsPerSecond(),sl.getUsedHeapMB(),sl.getMaxHeapMB(), + regionLoad,new HashSet(Arrays.asList(masterCoprocessors))); + convertedLoad.put(entry.getKey(), hsl); + } + this.liveServers = convertedLoad; this.deadServers = deadServers; this.master = master; this.backupMasters = backupMasters; diff --git a/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/src/main/java/org/apache/hadoop/hbase/ServerLoad.java new file mode 100644 index 00000000000..912f5199ec2 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -0,0 +1,155 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.util.List; +import java.util.TreeSet; + +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This class is used for exporting current state of load on a RegionServer. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ServerLoad { + public ServerLoad(HBaseProtos.ServerLoad serverLoad) { + this.serverLoad = serverLoad; + } + + /* @return the underlying ServerLoad protobuf object */ + public HBaseProtos.ServerLoad getServerLoadPB() { + return serverLoad; + } + + protected HBaseProtos.ServerLoad serverLoad; + + /* @return number of requests per second since last report. */ + public int getRequestsPerSecond() { + return serverLoad.getRequestsPerSecond(); + } + public boolean hasRequestsPerSecond() { + return serverLoad.hasRequestsPerSecond(); + } + + /* @return total Number of requests from the start of the region server. */ + public int getTotalNumberOfRequests() { + return serverLoad.getTotalNumberOfRequests(); + } + public boolean hasTotalNumberOfRequests() { + return serverLoad.hasTotalNumberOfRequests(); + } + + /* @return the amount of used heap, in MB. */ + public int getUsedHeapMB() { + return serverLoad.getUsedHeapMB(); + } + public boolean hasUsedHeapMB() { + return serverLoad.hasUsedHeapMB(); + } + + /* @return the maximum allowable size of the heap, in MB. */ + public int getMaxHeapMB() { + return serverLoad.getMaxHeapMB(); + } + public boolean hasMaxHeapMB() { + return serverLoad.hasMaxHeapMB(); + } + + /* Returns list of RegionLoads, which contain information on the load of individual regions. */ + public List getRegionLoadsList() { + return serverLoad.getRegionLoadsList(); + } + public RegionLoad getRegionLoads(int index) { + return serverLoad.getRegionLoads(index); + } + public int getRegionLoadsCount() { + return serverLoad.getRegionLoadsCount(); + } + + /** + * @return the list Regionserver-level coprocessors, e.g., WALObserver implementations. + * Region-level coprocessors, on the other hand, are stored inside the RegionLoad objects. + */ + public List getCoprocessorsList() { + return serverLoad.getCoprocessorsList(); + } + public Coprocessor getCoprocessors(int index) { + return serverLoad.getCoprocessors(index); + } + public int getCoprocessorsCount() { + return serverLoad.getCoprocessorsCount(); + } + + /** + * Return the RegionServer-level coprocessors from a ServerLoad pb. + * @param sl - ServerLoad + * @return string array of loaded RegionServer-level coprocessors + */ + public static String[] getRegionServerCoprocessors(ServerLoad sl) { + if (sl == null) { + return null; + } + + List list = sl.getCoprocessorsList(); + String [] ret = new String[list.size()]; + int i = 0; + for (Coprocessor elem : list) { + ret[i++] = elem.getName(); + } + + return ret; + } + + /** + * Return the RegionServer-level and Region-level coprocessors + * from a ServerLoad pb. + * @param sl - ServerLoad + * @return string array of loaded RegionServer-level and + * Region-level coprocessors + */ + public static String[] getAllCoprocessors(ServerLoad sl) { + if (sl == null) { + return null; + } + + // Need a set to remove duplicates, but since generated Coprocessor class + // is not Comparable, make it a Set instead of Set + TreeSet coprocessSet = new TreeSet(); + for (Coprocessor coprocessor : sl.getCoprocessorsList()) { + coprocessSet.add(coprocessor.getName()); + } + for (RegionLoad rl : sl.getRegionLoadsList()) { + for (Coprocessor coprocessor : rl.getCoprocessorsList()) { + coprocessSet.add(coprocessor.getName()); + } + } + + return coprocessSet.toArray(new String[0]); + } + + public static final ServerLoad EMPTY_SERVERLOAD = + new ServerLoad(HBaseProtos.ServerLoad.newBuilder().build()); +} diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java b/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java index 0db27604706..fc9176ddca1 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java @@ -29,6 +29,7 @@ import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.MetricsUtil; import org.apache.hadoop.metrics.Updater; import org.apache.hadoop.metrics.util.*; +import org.apache.hadoop.hbase.ipc.RegionServerStatusProtocol; import java.lang.reflect.Method; @@ -64,7 +65,7 @@ public class HBaseRpcMetrics implements Updater { context.registerUpdater(this); initMethods(HMasterInterface.class); - initMethods(HMasterRegionInterface.class); + initMethods(RegionServerStatusProtocol.class); initMethods(HRegionInterface.class); rpcStatistics = new HBaseRPCStatistics(this.registry, hostName, port); } diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java b/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java index 973c7cbeec5..b0a2731d257 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java @@ -43,6 +43,7 @@ import java.nio.channels.SocketChannel; import java.nio.channels.WritableByteChannel; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -133,6 +134,10 @@ public abstract class HBaseServer implements RpcServer { new ThreadLocal(); private volatile boolean started = false; + // For generated protocol classes which doesn't have VERSION field + private static final Map, Long> + PROTOCOL_VERSION = new HashMap, Long>(); + private static final Map> PROTOCOL_CACHE = new ConcurrentHashMap>(); diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java b/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java deleted file mode 100644 index fd9783016ef..00000000000 --- a/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright 2010 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.ipc; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HServerLoad; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.security.KerberosInfo; -import org.apache.hadoop.io.MapWritable; -import org.apache.hadoop.hbase.ipc.VersionedProtocol; - -/** - * The Master publishes this Interface for RegionServers to register themselves - * on. - */ -@KerberosInfo( - serverPrincipal = "hbase.master.kerberos.principal", - clientPrincipal = "hbase.regionserver.kerberos.principal") -@InterfaceAudience.Private -public interface HMasterRegionInterface extends VersionedProtocol { - /** - * This Interfaces' version. Version changes when the Interface changes. - */ - // All HBase Interfaces used derive from HBaseRPCProtocolVersion. It - // maintained a single global version number on all HBase Interfaces. This - // meant all HBase RPC was broke though only one of the three RPC Interfaces - // had changed. This has since been undone. - public static final long VERSION = 29L; - - /** - * Called when a region server first starts. - * @param port Port number this regionserver is up on. - * @param serverStartcode This servers' startcode. - * @param serverCurrentTime The current time of the region server in ms - * @throws IOException e - * @return Configuration for the regionserver to use: e.g. filesystem, - * hbase rootdir, the hostname to use creating the RegionServer ServerName, - * etc. - */ - public MapWritable regionServerStartup(final int port, - final long serverStartcode, final long serverCurrentTime) - throws IOException; - - /** - * @param sn {@link ServerName#getVersionedBytes()} - * @param hsl Server load. - * @throws IOException - */ - public void regionServerReport(byte [] sn, HServerLoad hsl) - throws IOException; - - /** - * Called by a region server to report a fatal error that is causing - * it to abort. - * @param sn {@link ServerName#getVersionedBytes()} - * @param errorMessage informative text to expose in the master logs and UI - */ - public void reportRSFatalError(byte [] sn, String errorMessage); -} diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java b/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java index bb6ab3b181d..70e9bc17c7f 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java @@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.io.HbaseObjectWritable; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; +import org.apache.hadoop.hbase.ipc.RegionServerStatusProtocol; import org.apache.hadoop.io.VersionMismatchException; import org.apache.hadoop.io.VersionedWritable; @@ -61,6 +63,8 @@ public class Invocation extends VersionedWritable implements Configurable { Long.valueOf(ClientProtocol.VERSION)); PROTOCOL_VERSION.put(AdminService.BlockingInterface.class, Long.valueOf(AdminProtocol.VERSION)); + PROTOCOL_VERSION.put(RegionServerStatusService.BlockingInterface.class, + Long.valueOf(RegionServerStatusProtocol.VERSION)); } // For protobuf protocols, which use ServiceException, instead of IOException @@ -70,6 +74,7 @@ public class Invocation extends VersionedWritable implements Configurable { static { PROTOBUF_PROTOCOLS.add(ClientProtocol.class); PROTOBUF_PROTOCOLS.add(AdminProtocol.class); + PROTOBUF_PROTOCOLS.add(RegionServerStatusProtocol.class); } private static byte RPC_VERSION = 1; diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/RegionServerStatusProtocol.java b/src/main/java/org/apache/hadoop/hbase/ipc/RegionServerStatusProtocol.java new file mode 100644 index 00000000000..87b9090a266 --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/ipc/RegionServerStatusProtocol.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.ipc.VersionedProtocol; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; +import org.apache.hadoop.hbase.security.TokenInfo; +import org.apache.hadoop.security.KerberosInfo; + +/** + * Protocol that a RegionServer uses to communicate its status to the Master. + */ +@KerberosInfo( + serverPrincipal = "hbase.master.kerberos.principal") +@TokenInfo("HBASE_AUTH_TOKEN") +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface RegionServerStatusProtocol extends + RegionServerStatusService.BlockingInterface, VersionedProtocol { + public static final long VERSION = 1L; +} diff --git a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index df675bc7dfb..16dc6f4a916 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.master.metrics.MasterMetrics; import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; +import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; @@ -3171,10 +3172,10 @@ public class AssignmentManager extends ZooKeeperListener { } } } - Map onlineSvrs = this.serverManager.getOnlineServers(); + Map onlineSvrs = this.serverManager.getOnlineServers(); // Take care of servers w/o assignments. for (Map> map : result.values()) { - for (Map.Entry svrEntry: onlineSvrs.entrySet()) { + for (Map.Entry svrEntry: onlineSvrs.entrySet()) { if (!map.containsKey(svrEntry.getKey())) { map.put(svrEntry.getKey(), new ArrayList()); } diff --git a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 353d564aafc..ccc7119638e 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -77,7 +77,8 @@ import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType; import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.ipc.HBaseServer; import org.apache.hadoop.hbase.ipc.HMasterInterface; -import org.apache.hadoop.hbase.ipc.HMasterRegionInterface; +import org.apache.hadoop.hbase.ipc.RegionServerStatusProtocol; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.ipc.ProtocolSignature; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.handler.CreateTableHandler; @@ -112,12 +113,21 @@ import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker; import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.io.MapWritable; -import org.apache.hadoop.io.Text; import org.apache.hadoop.metrics.util.MBeanUtil; import org.apache.hadoop.net.DNS; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.Watcher; +import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import com.google.protobuf.RpcController; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; +import com.google.protobuf.ServiceException; /** * HMaster is the "master server" for HBase. An HBase cluster has one active @@ -133,12 +143,12 @@ import org.apache.zookeeper.Watcher; *

You can also shutdown just this master. Call {@link #stopMaster()}. * * @see HMasterInterface - * @see HMasterRegionInterface + * @see MasterRegionInterface * @see Watcher */ @InterfaceAudience.Private public class HMaster extends HasThread -implements HMasterInterface, HMasterRegionInterface, MasterServices, +implements HMasterInterface, RegionServerStatusProtocol, MasterServices, Server { private static final Log LOG = LogFactory.getLog(HMaster.class.getName()); @@ -262,7 +272,7 @@ Server { int numHandlers = conf.getInt("hbase.master.handler.count", conf.getInt("hbase.regionserver.handler.count", 25)); this.rpcServer = HBaseRPC.getServer(this, - new Class[]{HMasterInterface.class, HMasterRegionInterface.class}, + new Class[]{HMasterInterface.class, RegionServerStatusProtocol.class}, initialIsa.getHostName(), // BindAddress is IP we got for this server. initialIsa.getPort(), numHandlers, @@ -564,7 +574,7 @@ Server { // Not registered; add it. LOG.info("Registering server found up in zk but who has not yet " + "reported in: " + sn); - this.serverManager.recordNewServer(sn, HServerLoad.EMPTY_HSERVERLOAD); + this.serverManager.recordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD); } } @@ -795,8 +805,8 @@ Server { throws IOException { if (HMasterInterface.class.getName().equals(protocol)) { return new ProtocolSignature(HMasterInterface.VERSION, null); - } else if (HMasterRegionInterface.class.getName().equals(protocol)) { - return new ProtocolSignature(HMasterRegionInterface.VERSION, null); + } else if (RegionServerStatusProtocol.class.getName().equals(protocol)) { + return new ProtocolSignature(RegionServerStatusProtocol.VERSION, null); } throw new IOException("Unknown protocol: " + protocol); } @@ -804,8 +814,8 @@ Server { public long getProtocolVersion(String protocol, long clientVersion) { if (HMasterInterface.class.getName().equals(protocol)) { return HMasterInterface.VERSION; - } else if (HMasterRegionInterface.class.getName().equals(protocol)) { - return HMasterRegionInterface.VERSION; + } else if (RegionServerStatusProtocol.class.getName().equals(protocol)) { + return RegionServerStatusProtocol.VERSION; } // unknown protocol LOG.warn("Version requested for unimplemented protocol: "+protocol); @@ -952,18 +962,25 @@ Server { } @Override - public MapWritable regionServerStartup(final int port, - final long serverStartCode, final long serverCurrentTime) - throws IOException { + public RegionServerStartupResponse regionServerStartup( + RpcController controller, RegionServerStartupRequest request) throws ServiceException { // Register with server manager - InetAddress ia = getRemoteInetAddress(port, serverStartCode); - ServerName rs = this.serverManager.regionServerStartup(ia, port, - serverStartCode, serverCurrentTime); - // Send back some config info - MapWritable mw = createConfigurationSubset(); - mw.put(new Text(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER), - new Text(rs.getHostname())); - return mw; + try { + InetAddress ia = getRemoteInetAddress(request.getPort(), request.getServerStartCode()); + ServerName rs = this.serverManager.regionServerStartup(ia, request.getPort(), + request.getServerStartCode(), request.getServerCurrentTime()); + + // Send back some config info + RegionServerStartupResponse.Builder resp = createConfigurationSubset(); + NameStringPair.Builder entry = NameStringPair.newBuilder() + .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER) + .setValue(rs.getHostname()); + resp.addMapEntries(entry.build()); + + return resp.build(); + } catch(IOException ioe) { + throw new ServiceException(ioe); + } } /** @@ -981,32 +998,49 @@ Server { * @return Subset of configuration to pass initializing regionservers: e.g. * the filesystem to use and root directory to use. */ - protected MapWritable createConfigurationSubset() { - MapWritable mw = addConfig(new MapWritable(), HConstants.HBASE_DIR); - return addConfig(mw, "fs.default.name"); + protected RegionServerStartupResponse.Builder createConfigurationSubset() { + RegionServerStartupResponse.Builder resp = addConfig( + RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR); + return addConfig(resp, "fs.default.name"); } - private MapWritable addConfig(final MapWritable mw, final String key) { - mw.put(new Text(key), new Text(this.conf.get(key))); - return mw; + private RegionServerStartupResponse.Builder addConfig( + final RegionServerStartupResponse.Builder resp, final String key) { + NameStringPair.Builder entry = NameStringPair.newBuilder() + .setName(key) + .setValue(this.conf.get(key)); + resp.addMapEntries(entry.build()); + return resp; } @Override - public void regionServerReport(final byte [] sn, final HServerLoad hsl) - throws IOException { - this.serverManager.regionServerReport(ServerName.parseVersionedServerName(sn), hsl); - if (hsl != null && this.metrics != null) { - // Up our metrics. - this.metrics.incrementRequests(hsl.getTotalNumberOfRequests()); + public RegionServerReportResponse regionServerReport( + RpcController controller,RegionServerReportRequest request) throws ServiceException { + try { + HBaseProtos.ServerLoad sl = request.getLoad(); + this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl)); + if (sl != null && this.metrics != null) { + // Up our metrics. + this.metrics.incrementRequests(sl.getTotalNumberOfRequests()); + } + } catch(IOException ioe) { + throw new ServiceException(ioe); } + + return RegionServerReportResponse.newBuilder().build(); } @Override - public void reportRSFatalError(byte [] sn, String errorText) { - String msg = "Region server " + Bytes.toString(sn) + + public ReportRSFatalErrorResponse reportRSFatalError( + RpcController controller, ReportRSFatalErrorRequest request) throws ServiceException { + String errorText = request.getErrorMessage(); + ServerName sn = ProtobufUtil.toServerName(request.getServer()); + String msg = "Region server " + Bytes.toString(sn.getVersionedBytes()) + " reported a fatal error:\n" + errorText; LOG.error(msg); rsFatals.add(msg); + + return ReportRSFatalErrorResponse.newBuilder().build(); } public boolean isMasterRunning() { diff --git a/src/main/java/org/apache/hadoop/hbase/master/MXBean.java b/src/main/java/org/apache/hadoop/hbase/master/MXBean.java index 7f44dc2db5e..535bf95dd91 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/MXBean.java +++ b/src/main/java/org/apache/hadoop/hbase/master/MXBean.java @@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.master; import java.util.Map; import org.apache.hadoop.classification.InterfaceStability.Evolving; -import org.apache.hadoop.hbase.HServerLoad; +import org.apache.hadoop.hbase.ServerLoad; /** * This is the JMX management interface for Hbase master information @@ -101,7 +101,7 @@ public interface MXBean { * Get the live region servers * @return Live region servers */ - public Map getRegionServers(); + public Map getRegionServers(); /** * Get the dead region servers diff --git a/src/main/java/org/apache/hadoop/hbase/master/MXBeanImpl.java b/src/main/java/org/apache/hadoop/hbase/master/MXBeanImpl.java index 45b8fe72273..2b3e21a26b1 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/MXBeanImpl.java +++ b/src/main/java/org/apache/hadoop/hbase/master/MXBeanImpl.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; -import org.apache.hadoop.hbase.HServerLoad; +import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.AssignmentManager.RegionState; @@ -77,9 +77,9 @@ public class MXBeanImpl implements MXBean { } @Override - public Map getRegionServers() { - Map data = new HashMap(); - for (final Entry entry : + public Map getRegionServers() { + Map data = new HashMap(); + for (final Entry entry : master.getServerManager().getOnlineServers().entrySet()) { data.put(entry.getKey().getServerName(), entry.getValue()); diff --git a/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java b/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java index be6383873f3..31440cd2d79 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java +++ b/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java @@ -32,8 +32,8 @@ import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HServerInfo; -import org.apache.hadoop.hbase.HServerLoad; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.master.AssignmentManager.RegionState; import org.apache.hadoop.hbase.monitoring.LogMonitoring; import org.apache.hadoop.hbase.monitoring.StateDumpServlet; @@ -114,9 +114,9 @@ public class MasterDumpServlet extends StateDumpServlet { } private void dumpServers(HMaster master, PrintWriter out) { - Map servers = + Map servers = master.getServerManager().getOnlineServers(); - for (Map.Entry e : servers.entrySet()) { + for (Map.Entry e : servers.entrySet()) { out.println(e.getKey() + ": " + e.getValue()); } } diff --git a/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 80271b188d1..6ba8ab02357 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -38,7 +38,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClockOutOfSyncException; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HServerLoad; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; @@ -51,7 +53,6 @@ import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; /** @@ -75,8 +76,8 @@ public class ServerManager { private volatile boolean clusterShutdown = false; /** Map of registered servers to their current load */ - private final Map onlineServers = - new ConcurrentHashMap(); + private final Map onlineServers = + new ConcurrentHashMap(); // TODO: This is strange to have two maps but HSI above is used on both sides /** @@ -154,11 +155,11 @@ public class ServerManager { checkClockSkew(sn, serverCurrentTime); checkIsDead(sn, "STARTUP"); checkAlreadySameHostPort(sn); - recordNewServer(sn, HServerLoad.EMPTY_HSERVERLOAD); + recordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD); return sn; } - void regionServerReport(ServerName sn, HServerLoad hsl) + void regionServerReport(ServerName sn, ServerLoad sl) throws YouAreDeadException, PleaseHoldException { checkIsDead(sn, "REPORT"); if (!this.onlineServers.containsKey(sn)) { @@ -169,9 +170,9 @@ public class ServerManager { // The only thing we are skipping is passing back to the regionserver // the ServerName to use. Here we presume a master has already done // that so we'll press on with whatever it gave us for ServerName. - recordNewServer(sn, hsl); + recordNewServer(sn, sl); } else { - this.onlineServers.put(sn, hsl); + this.onlineServers.put(sn, sl); } } @@ -255,9 +256,9 @@ public class ServerManager { * @param hsl * @param serverName The remote servers name. */ - void recordNewServer(final ServerName serverName, final HServerLoad hsl) { + void recordNewServer(final ServerName serverName, final ServerLoad sl) { LOG.info("Registering server=" + serverName); - this.onlineServers.put(serverName, hsl); + this.onlineServers.put(serverName, sl); this.serverConnections.remove(serverName); } @@ -265,7 +266,7 @@ public class ServerManager { * @param serverName * @return HServerLoad if serverName is known else null */ - public HServerLoad getLoad(final ServerName serverName) { + public ServerLoad getLoad(final ServerName serverName) { return this.onlineServers.get(serverName); } @@ -274,7 +275,7 @@ public class ServerManager { * @return HServerLoad if serverName is known else null * @deprecated Use {@link #getLoad(HServerAddress)} */ - public HServerLoad getLoad(final HServerAddress address) { + public ServerLoad getLoad(final HServerAddress address) { ServerName sn = new ServerName(address.toString(), ServerName.NON_STARTCODE); ServerName actual = ServerName.findServerWithSameHostnamePort(this.getOnlineServersList(), sn); @@ -291,9 +292,9 @@ public class ServerManager { int totalLoad = 0; int numServers = 0; double averageLoad = 0.0; - for (HServerLoad hsl: this.onlineServers.values()) { + for (ServerLoad sl: this.onlineServers.values()) { numServers++; - totalLoad += hsl.getNumberOfRegions(); + totalLoad += sl.getRegionLoadsCount(); } averageLoad = (double)totalLoad / (double)numServers; return averageLoad; @@ -308,7 +309,7 @@ public class ServerManager { /** * @return Read-only map of servers to serverinfo */ - public Map getOnlineServers() { + public Map getOnlineServers() { // Presumption is that iterating the returned Map is OK. synchronized (this.onlineServers) { return Collections.unmodifiableMap(this.onlineServers); diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 77bc2d4e33b..212ee3e6ba6 100644 --- a/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -101,6 +101,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; @@ -1403,5 +1404,18 @@ public final class ProtobufUtil { } } + /* + * Get the total (read + write) requests from a RegionLoad pb + * @param rl - RegionLoad pb + * @return total (read + write) requests + */ + public static long getTotalRequestsCount(RegionLoad rl) { + if (rl == null) { + return 0; + } + + return rl.getReadRequestsCount() + rl.getWriteRequestsCount(); + } + // End helpers for Admin } diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index 1b4a6312844..ae2094d5a3c 100644 --- a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -1345,6 +1345,2779 @@ public final class HBaseProtos { // @@protoc_insertion_point(class_scope:RegionSpecifier) } + public interface RegionLoadOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .RegionSpecifier regionSpecifier = 1; + boolean hasRegionSpecifier(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder(); + + // optional uint32 stores = 2; + boolean hasStores(); + int getStores(); + + // optional uint32 storefiles = 3; + boolean hasStorefiles(); + int getStorefiles(); + + // optional uint32 storeUncompressedSizeMB = 4; + boolean hasStoreUncompressedSizeMB(); + int getStoreUncompressedSizeMB(); + + // optional uint32 storefileSizeMB = 5; + boolean hasStorefileSizeMB(); + int getStorefileSizeMB(); + + // optional uint32 memstoreSizeMB = 6; + boolean hasMemstoreSizeMB(); + int getMemstoreSizeMB(); + + // optional uint32 storefileIndexSizeMB = 7; + boolean hasStorefileIndexSizeMB(); + int getStorefileIndexSizeMB(); + + // optional uint64 readRequestsCount = 8; + boolean hasReadRequestsCount(); + long getReadRequestsCount(); + + // optional uint64 writeRequestsCount = 9; + boolean hasWriteRequestsCount(); + long getWriteRequestsCount(); + + // optional uint64 totalCompactingKVs = 10; + boolean hasTotalCompactingKVs(); + long getTotalCompactingKVs(); + + // optional uint64 currentCompactedKVs = 11; + boolean hasCurrentCompactedKVs(); + long getCurrentCompactedKVs(); + + // optional uint32 rootIndexSizeKB = 12; + boolean hasRootIndexSizeKB(); + int getRootIndexSizeKB(); + + // optional uint32 totalStaticIndexSizeKB = 13; + boolean hasTotalStaticIndexSizeKB(); + int getTotalStaticIndexSizeKB(); + + // optional uint32 totalStaticBloomSizeKB = 14; + boolean hasTotalStaticBloomSizeKB(); + int getTotalStaticBloomSizeKB(); + + // repeated .Coprocessor coprocessors = 15; + java.util.List + getCoprocessorsList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index); + int getCoprocessorsCount(); + java.util.List + getCoprocessorsOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( + int index); + } + public static final class RegionLoad extends + com.google.protobuf.GeneratedMessage + implements RegionLoadOrBuilder { + // Use RegionLoad.newBuilder() to construct. + private RegionLoad(Builder builder) { + super(builder); + } + private RegionLoad(boolean noInit) {} + + private static final RegionLoad defaultInstance; + public static RegionLoad getDefaultInstance() { + return defaultInstance; + } + + public RegionLoad getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionLoad_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionLoad_fieldAccessorTable; + } + + private int bitField0_; + // required .RegionSpecifier regionSpecifier = 1; + public static final int REGIONSPECIFIER_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionSpecifier_; + public boolean hasRegionSpecifier() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier() { + return regionSpecifier_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder() { + return regionSpecifier_; + } + + // optional uint32 stores = 2; + public static final int STORES_FIELD_NUMBER = 2; + private int stores_; + public boolean hasStores() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getStores() { + return stores_; + } + + // optional uint32 storefiles = 3; + public static final int STOREFILES_FIELD_NUMBER = 3; + private int storefiles_; + public boolean hasStorefiles() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public int getStorefiles() { + return storefiles_; + } + + // optional uint32 storeUncompressedSizeMB = 4; + public static final int STOREUNCOMPRESSEDSIZEMB_FIELD_NUMBER = 4; + private int storeUncompressedSizeMB_; + public boolean hasStoreUncompressedSizeMB() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getStoreUncompressedSizeMB() { + return storeUncompressedSizeMB_; + } + + // optional uint32 storefileSizeMB = 5; + public static final int STOREFILESIZEMB_FIELD_NUMBER = 5; + private int storefileSizeMB_; + public boolean hasStorefileSizeMB() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public int getStorefileSizeMB() { + return storefileSizeMB_; + } + + // optional uint32 memstoreSizeMB = 6; + public static final int MEMSTORESIZEMB_FIELD_NUMBER = 6; + private int memstoreSizeMB_; + public boolean hasMemstoreSizeMB() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + public int getMemstoreSizeMB() { + return memstoreSizeMB_; + } + + // optional uint32 storefileIndexSizeMB = 7; + public static final int STOREFILEINDEXSIZEMB_FIELD_NUMBER = 7; + private int storefileIndexSizeMB_; + public boolean hasStorefileIndexSizeMB() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + public int getStorefileIndexSizeMB() { + return storefileIndexSizeMB_; + } + + // optional uint64 readRequestsCount = 8; + public static final int READREQUESTSCOUNT_FIELD_NUMBER = 8; + private long readRequestsCount_; + public boolean hasReadRequestsCount() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + public long getReadRequestsCount() { + return readRequestsCount_; + } + + // optional uint64 writeRequestsCount = 9; + public static final int WRITEREQUESTSCOUNT_FIELD_NUMBER = 9; + private long writeRequestsCount_; + public boolean hasWriteRequestsCount() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + public long getWriteRequestsCount() { + return writeRequestsCount_; + } + + // optional uint64 totalCompactingKVs = 10; + public static final int TOTALCOMPACTINGKVS_FIELD_NUMBER = 10; + private long totalCompactingKVs_; + public boolean hasTotalCompactingKVs() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + public long getTotalCompactingKVs() { + return totalCompactingKVs_; + } + + // optional uint64 currentCompactedKVs = 11; + public static final int CURRENTCOMPACTEDKVS_FIELD_NUMBER = 11; + private long currentCompactedKVs_; + public boolean hasCurrentCompactedKVs() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + public long getCurrentCompactedKVs() { + return currentCompactedKVs_; + } + + // optional uint32 rootIndexSizeKB = 12; + public static final int ROOTINDEXSIZEKB_FIELD_NUMBER = 12; + private int rootIndexSizeKB_; + public boolean hasRootIndexSizeKB() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + public int getRootIndexSizeKB() { + return rootIndexSizeKB_; + } + + // optional uint32 totalStaticIndexSizeKB = 13; + public static final int TOTALSTATICINDEXSIZEKB_FIELD_NUMBER = 13; + private int totalStaticIndexSizeKB_; + public boolean hasTotalStaticIndexSizeKB() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + public int getTotalStaticIndexSizeKB() { + return totalStaticIndexSizeKB_; + } + + // optional uint32 totalStaticBloomSizeKB = 14; + public static final int TOTALSTATICBLOOMSIZEKB_FIELD_NUMBER = 14; + private int totalStaticBloomSizeKB_; + public boolean hasTotalStaticBloomSizeKB() { + return ((bitField0_ & 0x00002000) == 0x00002000); + } + public int getTotalStaticBloomSizeKB() { + return totalStaticBloomSizeKB_; + } + + // repeated .Coprocessor coprocessors = 15; + public static final int COPROCESSORS_FIELD_NUMBER = 15; + private java.util.List coprocessors_; + public java.util.List getCoprocessorsList() { + return coprocessors_; + } + public java.util.List + getCoprocessorsOrBuilderList() { + return coprocessors_; + } + public int getCoprocessorsCount() { + return coprocessors_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) { + return coprocessors_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( + int index) { + return coprocessors_.get(index); + } + + private void initFields() { + regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + stores_ = 0; + storefiles_ = 0; + storeUncompressedSizeMB_ = 0; + storefileSizeMB_ = 0; + memstoreSizeMB_ = 0; + storefileIndexSizeMB_ = 0; + readRequestsCount_ = 0L; + writeRequestsCount_ = 0L; + totalCompactingKVs_ = 0L; + currentCompactedKVs_ = 0L; + rootIndexSizeKB_ = 0; + totalStaticIndexSizeKB_ = 0; + totalStaticBloomSizeKB_ = 0; + coprocessors_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegionSpecifier()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionSpecifier().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getCoprocessorsCount(); i++) { + if (!getCoprocessors(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, regionSpecifier_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, stores_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, storefiles_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt32(4, storeUncompressedSizeMB_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt32(5, storefileSizeMB_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeUInt32(6, memstoreSizeMB_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeUInt32(7, storefileIndexSizeMB_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeUInt64(8, readRequestsCount_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeUInt64(9, writeRequestsCount_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeUInt64(10, totalCompactingKVs_); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeUInt64(11, currentCompactedKVs_); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + output.writeUInt32(12, rootIndexSizeKB_); + } + if (((bitField0_ & 0x00001000) == 0x00001000)) { + output.writeUInt32(13, totalStaticIndexSizeKB_); + } + if (((bitField0_ & 0x00002000) == 0x00002000)) { + output.writeUInt32(14, totalStaticBloomSizeKB_); + } + for (int i = 0; i < coprocessors_.size(); i++) { + output.writeMessage(15, coprocessors_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, regionSpecifier_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, stores_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, storefiles_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(4, storeUncompressedSizeMB_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(5, storefileSizeMB_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(6, memstoreSizeMB_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(7, storefileIndexSizeMB_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(8, readRequestsCount_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(9, writeRequestsCount_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(10, totalCompactingKVs_); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(11, currentCompactedKVs_); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(12, rootIndexSizeKB_); + } + if (((bitField0_ & 0x00001000) == 0x00001000)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(13, totalStaticIndexSizeKB_); + } + if (((bitField0_ & 0x00002000) == 0x00002000)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(14, totalStaticBloomSizeKB_); + } + for (int i = 0; i < coprocessors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(15, coprocessors_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad) obj; + + boolean result = true; + result = result && (hasRegionSpecifier() == other.hasRegionSpecifier()); + if (hasRegionSpecifier()) { + result = result && getRegionSpecifier() + .equals(other.getRegionSpecifier()); + } + result = result && (hasStores() == other.hasStores()); + if (hasStores()) { + result = result && (getStores() + == other.getStores()); + } + result = result && (hasStorefiles() == other.hasStorefiles()); + if (hasStorefiles()) { + result = result && (getStorefiles() + == other.getStorefiles()); + } + result = result && (hasStoreUncompressedSizeMB() == other.hasStoreUncompressedSizeMB()); + if (hasStoreUncompressedSizeMB()) { + result = result && (getStoreUncompressedSizeMB() + == other.getStoreUncompressedSizeMB()); + } + result = result && (hasStorefileSizeMB() == other.hasStorefileSizeMB()); + if (hasStorefileSizeMB()) { + result = result && (getStorefileSizeMB() + == other.getStorefileSizeMB()); + } + result = result && (hasMemstoreSizeMB() == other.hasMemstoreSizeMB()); + if (hasMemstoreSizeMB()) { + result = result && (getMemstoreSizeMB() + == other.getMemstoreSizeMB()); + } + result = result && (hasStorefileIndexSizeMB() == other.hasStorefileIndexSizeMB()); + if (hasStorefileIndexSizeMB()) { + result = result && (getStorefileIndexSizeMB() + == other.getStorefileIndexSizeMB()); + } + result = result && (hasReadRequestsCount() == other.hasReadRequestsCount()); + if (hasReadRequestsCount()) { + result = result && (getReadRequestsCount() + == other.getReadRequestsCount()); + } + result = result && (hasWriteRequestsCount() == other.hasWriteRequestsCount()); + if (hasWriteRequestsCount()) { + result = result && (getWriteRequestsCount() + == other.getWriteRequestsCount()); + } + result = result && (hasTotalCompactingKVs() == other.hasTotalCompactingKVs()); + if (hasTotalCompactingKVs()) { + result = result && (getTotalCompactingKVs() + == other.getTotalCompactingKVs()); + } + result = result && (hasCurrentCompactedKVs() == other.hasCurrentCompactedKVs()); + if (hasCurrentCompactedKVs()) { + result = result && (getCurrentCompactedKVs() + == other.getCurrentCompactedKVs()); + } + result = result && (hasRootIndexSizeKB() == other.hasRootIndexSizeKB()); + if (hasRootIndexSizeKB()) { + result = result && (getRootIndexSizeKB() + == other.getRootIndexSizeKB()); + } + result = result && (hasTotalStaticIndexSizeKB() == other.hasTotalStaticIndexSizeKB()); + if (hasTotalStaticIndexSizeKB()) { + result = result && (getTotalStaticIndexSizeKB() + == other.getTotalStaticIndexSizeKB()); + } + result = result && (hasTotalStaticBloomSizeKB() == other.hasTotalStaticBloomSizeKB()); + if (hasTotalStaticBloomSizeKB()) { + result = result && (getTotalStaticBloomSizeKB() + == other.getTotalStaticBloomSizeKB()); + } + result = result && getCoprocessorsList() + .equals(other.getCoprocessorsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegionSpecifier()) { + hash = (37 * hash) + REGIONSPECIFIER_FIELD_NUMBER; + hash = (53 * hash) + getRegionSpecifier().hashCode(); + } + if (hasStores()) { + hash = (37 * hash) + STORES_FIELD_NUMBER; + hash = (53 * hash) + getStores(); + } + if (hasStorefiles()) { + hash = (37 * hash) + STOREFILES_FIELD_NUMBER; + hash = (53 * hash) + getStorefiles(); + } + if (hasStoreUncompressedSizeMB()) { + hash = (37 * hash) + STOREUNCOMPRESSEDSIZEMB_FIELD_NUMBER; + hash = (53 * hash) + getStoreUncompressedSizeMB(); + } + if (hasStorefileSizeMB()) { + hash = (37 * hash) + STOREFILESIZEMB_FIELD_NUMBER; + hash = (53 * hash) + getStorefileSizeMB(); + } + if (hasMemstoreSizeMB()) { + hash = (37 * hash) + MEMSTORESIZEMB_FIELD_NUMBER; + hash = (53 * hash) + getMemstoreSizeMB(); + } + if (hasStorefileIndexSizeMB()) { + hash = (37 * hash) + STOREFILEINDEXSIZEMB_FIELD_NUMBER; + hash = (53 * hash) + getStorefileIndexSizeMB(); + } + if (hasReadRequestsCount()) { + hash = (37 * hash) + READREQUESTSCOUNT_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getReadRequestsCount()); + } + if (hasWriteRequestsCount()) { + hash = (37 * hash) + WRITEREQUESTSCOUNT_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getWriteRequestsCount()); + } + if (hasTotalCompactingKVs()) { + hash = (37 * hash) + TOTALCOMPACTINGKVS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTotalCompactingKVs()); + } + if (hasCurrentCompactedKVs()) { + hash = (37 * hash) + CURRENTCOMPACTEDKVS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCurrentCompactedKVs()); + } + if (hasRootIndexSizeKB()) { + hash = (37 * hash) + ROOTINDEXSIZEKB_FIELD_NUMBER; + hash = (53 * hash) + getRootIndexSizeKB(); + } + if (hasTotalStaticIndexSizeKB()) { + hash = (37 * hash) + TOTALSTATICINDEXSIZEKB_FIELD_NUMBER; + hash = (53 * hash) + getTotalStaticIndexSizeKB(); + } + if (hasTotalStaticBloomSizeKB()) { + hash = (37 * hash) + TOTALSTATICBLOOMSIZEKB_FIELD_NUMBER; + hash = (53 * hash) + getTotalStaticBloomSizeKB(); + } + if (getCoprocessorsCount() > 0) { + hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER; + hash = (53 * hash) + getCoprocessorsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoadOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionLoad_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionLoad_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionSpecifierFieldBuilder(); + getCoprocessorsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionSpecifierBuilder_ == null) { + regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } else { + regionSpecifierBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + stores_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + storefiles_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + storeUncompressedSizeMB_ = 0; + bitField0_ = (bitField0_ & ~0x00000008); + storefileSizeMB_ = 0; + bitField0_ = (bitField0_ & ~0x00000010); + memstoreSizeMB_ = 0; + bitField0_ = (bitField0_ & ~0x00000020); + storefileIndexSizeMB_ = 0; + bitField0_ = (bitField0_ & ~0x00000040); + readRequestsCount_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); + writeRequestsCount_ = 0L; + bitField0_ = (bitField0_ & ~0x00000100); + totalCompactingKVs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000200); + currentCompactedKVs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000400); + rootIndexSizeKB_ = 0; + bitField0_ = (bitField0_ & ~0x00000800); + totalStaticIndexSizeKB_ = 0; + bitField0_ = (bitField0_ & ~0x00001000); + totalStaticBloomSizeKB_ = 0; + bitField0_ = (bitField0_ & ~0x00002000); + if (coprocessorsBuilder_ == null) { + coprocessors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00004000); + } else { + coprocessorsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionSpecifierBuilder_ == null) { + result.regionSpecifier_ = regionSpecifier_; + } else { + result.regionSpecifier_ = regionSpecifierBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.stores_ = stores_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.storefiles_ = storefiles_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.storeUncompressedSizeMB_ = storeUncompressedSizeMB_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.storefileSizeMB_ = storefileSizeMB_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.memstoreSizeMB_ = memstoreSizeMB_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + result.storefileIndexSizeMB_ = storefileIndexSizeMB_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000080; + } + result.readRequestsCount_ = readRequestsCount_; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000100; + } + result.writeRequestsCount_ = writeRequestsCount_; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000200; + } + result.totalCompactingKVs_ = totalCompactingKVs_; + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000400; + } + result.currentCompactedKVs_ = currentCompactedKVs_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000800; + } + result.rootIndexSizeKB_ = rootIndexSizeKB_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00001000; + } + result.totalStaticIndexSizeKB_ = totalStaticIndexSizeKB_; + if (((from_bitField0_ & 0x00002000) == 0x00002000)) { + to_bitField0_ |= 0x00002000; + } + result.totalStaticBloomSizeKB_ = totalStaticBloomSizeKB_; + if (coprocessorsBuilder_ == null) { + if (((bitField0_ & 0x00004000) == 0x00004000)) { + coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_); + bitField0_ = (bitField0_ & ~0x00004000); + } + result.coprocessors_ = coprocessors_; + } else { + result.coprocessors_ = coprocessorsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.getDefaultInstance()) return this; + if (other.hasRegionSpecifier()) { + mergeRegionSpecifier(other.getRegionSpecifier()); + } + if (other.hasStores()) { + setStores(other.getStores()); + } + if (other.hasStorefiles()) { + setStorefiles(other.getStorefiles()); + } + if (other.hasStoreUncompressedSizeMB()) { + setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB()); + } + if (other.hasStorefileSizeMB()) { + setStorefileSizeMB(other.getStorefileSizeMB()); + } + if (other.hasMemstoreSizeMB()) { + setMemstoreSizeMB(other.getMemstoreSizeMB()); + } + if (other.hasStorefileIndexSizeMB()) { + setStorefileIndexSizeMB(other.getStorefileIndexSizeMB()); + } + if (other.hasReadRequestsCount()) { + setReadRequestsCount(other.getReadRequestsCount()); + } + if (other.hasWriteRequestsCount()) { + setWriteRequestsCount(other.getWriteRequestsCount()); + } + if (other.hasTotalCompactingKVs()) { + setTotalCompactingKVs(other.getTotalCompactingKVs()); + } + if (other.hasCurrentCompactedKVs()) { + setCurrentCompactedKVs(other.getCurrentCompactedKVs()); + } + if (other.hasRootIndexSizeKB()) { + setRootIndexSizeKB(other.getRootIndexSizeKB()); + } + if (other.hasTotalStaticIndexSizeKB()) { + setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB()); + } + if (other.hasTotalStaticBloomSizeKB()) { + setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB()); + } + if (coprocessorsBuilder_ == null) { + if (!other.coprocessors_.isEmpty()) { + if (coprocessors_.isEmpty()) { + coprocessors_ = other.coprocessors_; + bitField0_ = (bitField0_ & ~0x00004000); + } else { + ensureCoprocessorsIsMutable(); + coprocessors_.addAll(other.coprocessors_); + } + onChanged(); + } + } else { + if (!other.coprocessors_.isEmpty()) { + if (coprocessorsBuilder_.isEmpty()) { + coprocessorsBuilder_.dispose(); + coprocessorsBuilder_ = null; + coprocessors_ = other.coprocessors_; + bitField0_ = (bitField0_ & ~0x00004000); + coprocessorsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getCoprocessorsFieldBuilder() : null; + } else { + coprocessorsBuilder_.addAllMessages(other.coprocessors_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegionSpecifier()) { + + return false; + } + if (!getRegionSpecifier().isInitialized()) { + + return false; + } + for (int i = 0; i < getCoprocessorsCount(); i++) { + if (!getCoprocessors(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(); + if (hasRegionSpecifier()) { + subBuilder.mergeFrom(getRegionSpecifier()); + } + input.readMessage(subBuilder, extensionRegistry); + setRegionSpecifier(subBuilder.buildPartial()); + break; + } + case 16: { + bitField0_ |= 0x00000002; + stores_ = input.readUInt32(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + storefiles_ = input.readUInt32(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + storeUncompressedSizeMB_ = input.readUInt32(); + break; + } + case 40: { + bitField0_ |= 0x00000010; + storefileSizeMB_ = input.readUInt32(); + break; + } + case 48: { + bitField0_ |= 0x00000020; + memstoreSizeMB_ = input.readUInt32(); + break; + } + case 56: { + bitField0_ |= 0x00000040; + storefileIndexSizeMB_ = input.readUInt32(); + break; + } + case 64: { + bitField0_ |= 0x00000080; + readRequestsCount_ = input.readUInt64(); + break; + } + case 72: { + bitField0_ |= 0x00000100; + writeRequestsCount_ = input.readUInt64(); + break; + } + case 80: { + bitField0_ |= 0x00000200; + totalCompactingKVs_ = input.readUInt64(); + break; + } + case 88: { + bitField0_ |= 0x00000400; + currentCompactedKVs_ = input.readUInt64(); + break; + } + case 96: { + bitField0_ |= 0x00000800; + rootIndexSizeKB_ = input.readUInt32(); + break; + } + case 104: { + bitField0_ |= 0x00001000; + totalStaticIndexSizeKB_ = input.readUInt32(); + break; + } + case 112: { + bitField0_ |= 0x00002000; + totalStaticBloomSizeKB_ = input.readUInt32(); + break; + } + case 122: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addCoprocessors(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .RegionSpecifier regionSpecifier = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionSpecifierBuilder_; + public boolean hasRegionSpecifier() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier() { + if (regionSpecifierBuilder_ == null) { + return regionSpecifier_; + } else { + return regionSpecifierBuilder_.getMessage(); + } + } + public Builder setRegionSpecifier(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionSpecifierBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionSpecifier_ = value; + onChanged(); + } else { + regionSpecifierBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setRegionSpecifier( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionSpecifierBuilder_ == null) { + regionSpecifier_ = builderForValue.build(); + onChanged(); + } else { + regionSpecifierBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeRegionSpecifier(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionSpecifierBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + regionSpecifier_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { + regionSpecifier_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionSpecifier_).mergeFrom(value).buildPartial(); + } else { + regionSpecifier_ = value; + } + onChanged(); + } else { + regionSpecifierBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearRegionSpecifier() { + if (regionSpecifierBuilder_ == null) { + regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + onChanged(); + } else { + regionSpecifierBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionSpecifierBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionSpecifierFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder() { + if (regionSpecifierBuilder_ != null) { + return regionSpecifierBuilder_.getMessageOrBuilder(); + } else { + return regionSpecifier_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionSpecifierFieldBuilder() { + if (regionSpecifierBuilder_ == null) { + regionSpecifierBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + regionSpecifier_, + getParentForChildren(), + isClean()); + regionSpecifier_ = null; + } + return regionSpecifierBuilder_; + } + + // optional uint32 stores = 2; + private int stores_ ; + public boolean hasStores() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getStores() { + return stores_; + } + public Builder setStores(int value) { + bitField0_ |= 0x00000002; + stores_ = value; + onChanged(); + return this; + } + public Builder clearStores() { + bitField0_ = (bitField0_ & ~0x00000002); + stores_ = 0; + onChanged(); + return this; + } + + // optional uint32 storefiles = 3; + private int storefiles_ ; + public boolean hasStorefiles() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public int getStorefiles() { + return storefiles_; + } + public Builder setStorefiles(int value) { + bitField0_ |= 0x00000004; + storefiles_ = value; + onChanged(); + return this; + } + public Builder clearStorefiles() { + bitField0_ = (bitField0_ & ~0x00000004); + storefiles_ = 0; + onChanged(); + return this; + } + + // optional uint32 storeUncompressedSizeMB = 4; + private int storeUncompressedSizeMB_ ; + public boolean hasStoreUncompressedSizeMB() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getStoreUncompressedSizeMB() { + return storeUncompressedSizeMB_; + } + public Builder setStoreUncompressedSizeMB(int value) { + bitField0_ |= 0x00000008; + storeUncompressedSizeMB_ = value; + onChanged(); + return this; + } + public Builder clearStoreUncompressedSizeMB() { + bitField0_ = (bitField0_ & ~0x00000008); + storeUncompressedSizeMB_ = 0; + onChanged(); + return this; + } + + // optional uint32 storefileSizeMB = 5; + private int storefileSizeMB_ ; + public boolean hasStorefileSizeMB() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public int getStorefileSizeMB() { + return storefileSizeMB_; + } + public Builder setStorefileSizeMB(int value) { + bitField0_ |= 0x00000010; + storefileSizeMB_ = value; + onChanged(); + return this; + } + public Builder clearStorefileSizeMB() { + bitField0_ = (bitField0_ & ~0x00000010); + storefileSizeMB_ = 0; + onChanged(); + return this; + } + + // optional uint32 memstoreSizeMB = 6; + private int memstoreSizeMB_ ; + public boolean hasMemstoreSizeMB() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + public int getMemstoreSizeMB() { + return memstoreSizeMB_; + } + public Builder setMemstoreSizeMB(int value) { + bitField0_ |= 0x00000020; + memstoreSizeMB_ = value; + onChanged(); + return this; + } + public Builder clearMemstoreSizeMB() { + bitField0_ = (bitField0_ & ~0x00000020); + memstoreSizeMB_ = 0; + onChanged(); + return this; + } + + // optional uint32 storefileIndexSizeMB = 7; + private int storefileIndexSizeMB_ ; + public boolean hasStorefileIndexSizeMB() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + public int getStorefileIndexSizeMB() { + return storefileIndexSizeMB_; + } + public Builder setStorefileIndexSizeMB(int value) { + bitField0_ |= 0x00000040; + storefileIndexSizeMB_ = value; + onChanged(); + return this; + } + public Builder clearStorefileIndexSizeMB() { + bitField0_ = (bitField0_ & ~0x00000040); + storefileIndexSizeMB_ = 0; + onChanged(); + return this; + } + + // optional uint64 readRequestsCount = 8; + private long readRequestsCount_ ; + public boolean hasReadRequestsCount() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + public long getReadRequestsCount() { + return readRequestsCount_; + } + public Builder setReadRequestsCount(long value) { + bitField0_ |= 0x00000080; + readRequestsCount_ = value; + onChanged(); + return this; + } + public Builder clearReadRequestsCount() { + bitField0_ = (bitField0_ & ~0x00000080); + readRequestsCount_ = 0L; + onChanged(); + return this; + } + + // optional uint64 writeRequestsCount = 9; + private long writeRequestsCount_ ; + public boolean hasWriteRequestsCount() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + public long getWriteRequestsCount() { + return writeRequestsCount_; + } + public Builder setWriteRequestsCount(long value) { + bitField0_ |= 0x00000100; + writeRequestsCount_ = value; + onChanged(); + return this; + } + public Builder clearWriteRequestsCount() { + bitField0_ = (bitField0_ & ~0x00000100); + writeRequestsCount_ = 0L; + onChanged(); + return this; + } + + // optional uint64 totalCompactingKVs = 10; + private long totalCompactingKVs_ ; + public boolean hasTotalCompactingKVs() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + public long getTotalCompactingKVs() { + return totalCompactingKVs_; + } + public Builder setTotalCompactingKVs(long value) { + bitField0_ |= 0x00000200; + totalCompactingKVs_ = value; + onChanged(); + return this; + } + public Builder clearTotalCompactingKVs() { + bitField0_ = (bitField0_ & ~0x00000200); + totalCompactingKVs_ = 0L; + onChanged(); + return this; + } + + // optional uint64 currentCompactedKVs = 11; + private long currentCompactedKVs_ ; + public boolean hasCurrentCompactedKVs() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + public long getCurrentCompactedKVs() { + return currentCompactedKVs_; + } + public Builder setCurrentCompactedKVs(long value) { + bitField0_ |= 0x00000400; + currentCompactedKVs_ = value; + onChanged(); + return this; + } + public Builder clearCurrentCompactedKVs() { + bitField0_ = (bitField0_ & ~0x00000400); + currentCompactedKVs_ = 0L; + onChanged(); + return this; + } + + // optional uint32 rootIndexSizeKB = 12; + private int rootIndexSizeKB_ ; + public boolean hasRootIndexSizeKB() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + public int getRootIndexSizeKB() { + return rootIndexSizeKB_; + } + public Builder setRootIndexSizeKB(int value) { + bitField0_ |= 0x00000800; + rootIndexSizeKB_ = value; + onChanged(); + return this; + } + public Builder clearRootIndexSizeKB() { + bitField0_ = (bitField0_ & ~0x00000800); + rootIndexSizeKB_ = 0; + onChanged(); + return this; + } + + // optional uint32 totalStaticIndexSizeKB = 13; + private int totalStaticIndexSizeKB_ ; + public boolean hasTotalStaticIndexSizeKB() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + public int getTotalStaticIndexSizeKB() { + return totalStaticIndexSizeKB_; + } + public Builder setTotalStaticIndexSizeKB(int value) { + bitField0_ |= 0x00001000; + totalStaticIndexSizeKB_ = value; + onChanged(); + return this; + } + public Builder clearTotalStaticIndexSizeKB() { + bitField0_ = (bitField0_ & ~0x00001000); + totalStaticIndexSizeKB_ = 0; + onChanged(); + return this; + } + + // optional uint32 totalStaticBloomSizeKB = 14; + private int totalStaticBloomSizeKB_ ; + public boolean hasTotalStaticBloomSizeKB() { + return ((bitField0_ & 0x00002000) == 0x00002000); + } + public int getTotalStaticBloomSizeKB() { + return totalStaticBloomSizeKB_; + } + public Builder setTotalStaticBloomSizeKB(int value) { + bitField0_ |= 0x00002000; + totalStaticBloomSizeKB_ = value; + onChanged(); + return this; + } + public Builder clearTotalStaticBloomSizeKB() { + bitField0_ = (bitField0_ & ~0x00002000); + totalStaticBloomSizeKB_ = 0; + onChanged(); + return this; + } + + // repeated .Coprocessor coprocessors = 15; + private java.util.List coprocessors_ = + java.util.Collections.emptyList(); + private void ensureCoprocessorsIsMutable() { + if (!((bitField0_ & 0x00004000) == 0x00004000)) { + coprocessors_ = new java.util.ArrayList(coprocessors_); + bitField0_ |= 0x00004000; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> coprocessorsBuilder_; + + public java.util.List getCoprocessorsList() { + if (coprocessorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(coprocessors_); + } else { + return coprocessorsBuilder_.getMessageList(); + } + } + public int getCoprocessorsCount() { + if (coprocessorsBuilder_ == null) { + return coprocessors_.size(); + } else { + return coprocessorsBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) { + if (coprocessorsBuilder_ == null) { + return coprocessors_.get(index); + } else { + return coprocessorsBuilder_.getMessage(index); + } + } + public Builder setCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { + if (coprocessorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoprocessorsIsMutable(); + coprocessors_.set(index, value); + onChanged(); + } else { + coprocessorsBuilder_.setMessage(index, value); + } + return this; + } + public Builder setCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.set(index, builderForValue.build()); + onChanged(); + } else { + coprocessorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addCoprocessors(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { + if (coprocessorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoprocessorsIsMutable(); + coprocessors_.add(value); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(value); + } + return this; + } + public Builder addCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { + if (coprocessorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoprocessorsIsMutable(); + coprocessors_.add(index, value); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(index, value); + } + return this; + } + public Builder addCoprocessors( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.add(builderForValue.build()); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.add(index, builderForValue.build()); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllCoprocessors( + java.lang.Iterable values) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + super.addAll(values, coprocessors_); + onChanged(); + } else { + coprocessorsBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearCoprocessors() { + if (coprocessorsBuilder_ == null) { + coprocessors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00004000); + onChanged(); + } else { + coprocessorsBuilder_.clear(); + } + return this; + } + public Builder removeCoprocessors(int index) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.remove(index); + onChanged(); + } else { + coprocessorsBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder getCoprocessorsBuilder( + int index) { + return getCoprocessorsFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( + int index) { + if (coprocessorsBuilder_ == null) { + return coprocessors_.get(index); } else { + return coprocessorsBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getCoprocessorsOrBuilderList() { + if (coprocessorsBuilder_ != null) { + return coprocessorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(coprocessors_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder() { + return getCoprocessorsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder( + int index) { + return getCoprocessorsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()); + } + public java.util.List + getCoprocessorsBuilderList() { + return getCoprocessorsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> + getCoprocessorsFieldBuilder() { + if (coprocessorsBuilder_ == null) { + coprocessorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>( + coprocessors_, + ((bitField0_ & 0x00004000) == 0x00004000), + getParentForChildren(), + isClean()); + coprocessors_ = null; + } + return coprocessorsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RegionLoad) + } + + static { + defaultInstance = new RegionLoad(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionLoad) + } + + public interface ServerLoadOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional uint32 requestsPerSecond = 1; + boolean hasRequestsPerSecond(); + int getRequestsPerSecond(); + + // optional uint32 totalNumberOfRequests = 2; + boolean hasTotalNumberOfRequests(); + int getTotalNumberOfRequests(); + + // optional uint32 usedHeapMB = 3; + boolean hasUsedHeapMB(); + int getUsedHeapMB(); + + // optional uint32 maxHeapMB = 4; + boolean hasMaxHeapMB(); + int getMaxHeapMB(); + + // repeated .RegionLoad regionLoads = 5; + java.util.List + getRegionLoadsList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad getRegionLoads(int index); + int getRegionLoadsCount(); + java.util.List + getRegionLoadsOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( + int index); + + // repeated .Coprocessor coprocessors = 6; + java.util.List + getCoprocessorsList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index); + int getCoprocessorsCount(); + java.util.List + getCoprocessorsOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( + int index); + } + public static final class ServerLoad extends + com.google.protobuf.GeneratedMessage + implements ServerLoadOrBuilder { + // Use ServerLoad.newBuilder() to construct. + private ServerLoad(Builder builder) { + super(builder); + } + private ServerLoad(boolean noInit) {} + + private static final ServerLoad defaultInstance; + public static ServerLoad getDefaultInstance() { + return defaultInstance; + } + + public ServerLoad getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerLoad_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerLoad_fieldAccessorTable; + } + + private int bitField0_; + // optional uint32 requestsPerSecond = 1; + public static final int REQUESTSPERSECOND_FIELD_NUMBER = 1; + private int requestsPerSecond_; + public boolean hasRequestsPerSecond() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getRequestsPerSecond() { + return requestsPerSecond_; + } + + // optional uint32 totalNumberOfRequests = 2; + public static final int TOTALNUMBEROFREQUESTS_FIELD_NUMBER = 2; + private int totalNumberOfRequests_; + public boolean hasTotalNumberOfRequests() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getTotalNumberOfRequests() { + return totalNumberOfRequests_; + } + + // optional uint32 usedHeapMB = 3; + public static final int USEDHEAPMB_FIELD_NUMBER = 3; + private int usedHeapMB_; + public boolean hasUsedHeapMB() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public int getUsedHeapMB() { + return usedHeapMB_; + } + + // optional uint32 maxHeapMB = 4; + public static final int MAXHEAPMB_FIELD_NUMBER = 4; + private int maxHeapMB_; + public boolean hasMaxHeapMB() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getMaxHeapMB() { + return maxHeapMB_; + } + + // repeated .RegionLoad regionLoads = 5; + public static final int REGIONLOADS_FIELD_NUMBER = 5; + private java.util.List regionLoads_; + public java.util.List getRegionLoadsList() { + return regionLoads_; + } + public java.util.List + getRegionLoadsOrBuilderList() { + return regionLoads_; + } + public int getRegionLoadsCount() { + return regionLoads_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad getRegionLoads(int index) { + return regionLoads_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( + int index) { + return regionLoads_.get(index); + } + + // repeated .Coprocessor coprocessors = 6; + public static final int COPROCESSORS_FIELD_NUMBER = 6; + private java.util.List coprocessors_; + public java.util.List getCoprocessorsList() { + return coprocessors_; + } + public java.util.List + getCoprocessorsOrBuilderList() { + return coprocessors_; + } + public int getCoprocessorsCount() { + return coprocessors_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) { + return coprocessors_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( + int index) { + return coprocessors_.get(index); + } + + private void initFields() { + requestsPerSecond_ = 0; + totalNumberOfRequests_ = 0; + usedHeapMB_ = 0; + maxHeapMB_ = 0; + regionLoads_ = java.util.Collections.emptyList(); + coprocessors_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getRegionLoadsCount(); i++) { + if (!getRegionLoads(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getCoprocessorsCount(); i++) { + if (!getCoprocessors(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, requestsPerSecond_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, totalNumberOfRequests_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, usedHeapMB_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt32(4, maxHeapMB_); + } + for (int i = 0; i < regionLoads_.size(); i++) { + output.writeMessage(5, regionLoads_.get(i)); + } + for (int i = 0; i < coprocessors_.size(); i++) { + output.writeMessage(6, coprocessors_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, requestsPerSecond_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, totalNumberOfRequests_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, usedHeapMB_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(4, maxHeapMB_); + } + for (int i = 0; i < regionLoads_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, regionLoads_.get(i)); + } + for (int i = 0; i < coprocessors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, coprocessors_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad) obj; + + boolean result = true; + result = result && (hasRequestsPerSecond() == other.hasRequestsPerSecond()); + if (hasRequestsPerSecond()) { + result = result && (getRequestsPerSecond() + == other.getRequestsPerSecond()); + } + result = result && (hasTotalNumberOfRequests() == other.hasTotalNumberOfRequests()); + if (hasTotalNumberOfRequests()) { + result = result && (getTotalNumberOfRequests() + == other.getTotalNumberOfRequests()); + } + result = result && (hasUsedHeapMB() == other.hasUsedHeapMB()); + if (hasUsedHeapMB()) { + result = result && (getUsedHeapMB() + == other.getUsedHeapMB()); + } + result = result && (hasMaxHeapMB() == other.hasMaxHeapMB()); + if (hasMaxHeapMB()) { + result = result && (getMaxHeapMB() + == other.getMaxHeapMB()); + } + result = result && getRegionLoadsList() + .equals(other.getRegionLoadsList()); + result = result && getCoprocessorsList() + .equals(other.getCoprocessorsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRequestsPerSecond()) { + hash = (37 * hash) + REQUESTSPERSECOND_FIELD_NUMBER; + hash = (53 * hash) + getRequestsPerSecond(); + } + if (hasTotalNumberOfRequests()) { + hash = (37 * hash) + TOTALNUMBEROFREQUESTS_FIELD_NUMBER; + hash = (53 * hash) + getTotalNumberOfRequests(); + } + if (hasUsedHeapMB()) { + hash = (37 * hash) + USEDHEAPMB_FIELD_NUMBER; + hash = (53 * hash) + getUsedHeapMB(); + } + if (hasMaxHeapMB()) { + hash = (37 * hash) + MAXHEAPMB_FIELD_NUMBER; + hash = (53 * hash) + getMaxHeapMB(); + } + if (getRegionLoadsCount() > 0) { + hash = (37 * hash) + REGIONLOADS_FIELD_NUMBER; + hash = (53 * hash) + getRegionLoadsList().hashCode(); + } + if (getCoprocessorsCount() > 0) { + hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER; + hash = (53 * hash) + getCoprocessorsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerLoad_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerLoad_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionLoadsFieldBuilder(); + getCoprocessorsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + requestsPerSecond_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + totalNumberOfRequests_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + usedHeapMB_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + maxHeapMB_ = 0; + bitField0_ = (bitField0_ & ~0x00000008); + if (regionLoadsBuilder_ == null) { + regionLoads_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + regionLoadsBuilder_.clear(); + } + if (coprocessorsBuilder_ == null) { + coprocessors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + } else { + coprocessorsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.requestsPerSecond_ = requestsPerSecond_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.totalNumberOfRequests_ = totalNumberOfRequests_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.usedHeapMB_ = usedHeapMB_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.maxHeapMB_ = maxHeapMB_; + if (regionLoadsBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.regionLoads_ = regionLoads_; + } else { + result.regionLoads_ = regionLoadsBuilder_.build(); + } + if (coprocessorsBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { + coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.coprocessors_ = coprocessors_; + } else { + result.coprocessors_ = coprocessorsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance()) return this; + if (other.hasRequestsPerSecond()) { + setRequestsPerSecond(other.getRequestsPerSecond()); + } + if (other.hasTotalNumberOfRequests()) { + setTotalNumberOfRequests(other.getTotalNumberOfRequests()); + } + if (other.hasUsedHeapMB()) { + setUsedHeapMB(other.getUsedHeapMB()); + } + if (other.hasMaxHeapMB()) { + setMaxHeapMB(other.getMaxHeapMB()); + } + if (regionLoadsBuilder_ == null) { + if (!other.regionLoads_.isEmpty()) { + if (regionLoads_.isEmpty()) { + regionLoads_ = other.regionLoads_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureRegionLoadsIsMutable(); + regionLoads_.addAll(other.regionLoads_); + } + onChanged(); + } + } else { + if (!other.regionLoads_.isEmpty()) { + if (regionLoadsBuilder_.isEmpty()) { + regionLoadsBuilder_.dispose(); + regionLoadsBuilder_ = null; + regionLoads_ = other.regionLoads_; + bitField0_ = (bitField0_ & ~0x00000010); + regionLoadsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionLoadsFieldBuilder() : null; + } else { + regionLoadsBuilder_.addAllMessages(other.regionLoads_); + } + } + } + if (coprocessorsBuilder_ == null) { + if (!other.coprocessors_.isEmpty()) { + if (coprocessors_.isEmpty()) { + coprocessors_ = other.coprocessors_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureCoprocessorsIsMutable(); + coprocessors_.addAll(other.coprocessors_); + } + onChanged(); + } + } else { + if (!other.coprocessors_.isEmpty()) { + if (coprocessorsBuilder_.isEmpty()) { + coprocessorsBuilder_.dispose(); + coprocessorsBuilder_ = null; + coprocessors_ = other.coprocessors_; + bitField0_ = (bitField0_ & ~0x00000020); + coprocessorsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getCoprocessorsFieldBuilder() : null; + } else { + coprocessorsBuilder_.addAllMessages(other.coprocessors_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getRegionLoadsCount(); i++) { + if (!getRegionLoads(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getCoprocessorsCount(); i++) { + if (!getCoprocessors(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + requestsPerSecond_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + totalNumberOfRequests_ = input.readUInt32(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + usedHeapMB_ = input.readUInt32(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + maxHeapMB_ = input.readUInt32(); + break; + } + case 42: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addRegionLoads(subBuilder.buildPartial()); + break; + } + case 50: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addCoprocessors(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // optional uint32 requestsPerSecond = 1; + private int requestsPerSecond_ ; + public boolean hasRequestsPerSecond() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getRequestsPerSecond() { + return requestsPerSecond_; + } + public Builder setRequestsPerSecond(int value) { + bitField0_ |= 0x00000001; + requestsPerSecond_ = value; + onChanged(); + return this; + } + public Builder clearRequestsPerSecond() { + bitField0_ = (bitField0_ & ~0x00000001); + requestsPerSecond_ = 0; + onChanged(); + return this; + } + + // optional uint32 totalNumberOfRequests = 2; + private int totalNumberOfRequests_ ; + public boolean hasTotalNumberOfRequests() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getTotalNumberOfRequests() { + return totalNumberOfRequests_; + } + public Builder setTotalNumberOfRequests(int value) { + bitField0_ |= 0x00000002; + totalNumberOfRequests_ = value; + onChanged(); + return this; + } + public Builder clearTotalNumberOfRequests() { + bitField0_ = (bitField0_ & ~0x00000002); + totalNumberOfRequests_ = 0; + onChanged(); + return this; + } + + // optional uint32 usedHeapMB = 3; + private int usedHeapMB_ ; + public boolean hasUsedHeapMB() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public int getUsedHeapMB() { + return usedHeapMB_; + } + public Builder setUsedHeapMB(int value) { + bitField0_ |= 0x00000004; + usedHeapMB_ = value; + onChanged(); + return this; + } + public Builder clearUsedHeapMB() { + bitField0_ = (bitField0_ & ~0x00000004); + usedHeapMB_ = 0; + onChanged(); + return this; + } + + // optional uint32 maxHeapMB = 4; + private int maxHeapMB_ ; + public boolean hasMaxHeapMB() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public int getMaxHeapMB() { + return maxHeapMB_; + } + public Builder setMaxHeapMB(int value) { + bitField0_ |= 0x00000008; + maxHeapMB_ = value; + onChanged(); + return this; + } + public Builder clearMaxHeapMB() { + bitField0_ = (bitField0_ & ~0x00000008); + maxHeapMB_ = 0; + onChanged(); + return this; + } + + // repeated .RegionLoad regionLoads = 5; + private java.util.List regionLoads_ = + java.util.Collections.emptyList(); + private void ensureRegionLoadsIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + regionLoads_ = new java.util.ArrayList(regionLoads_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoadOrBuilder> regionLoadsBuilder_; + + public java.util.List getRegionLoadsList() { + if (regionLoadsBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionLoads_); + } else { + return regionLoadsBuilder_.getMessageList(); + } + } + public int getRegionLoadsCount() { + if (regionLoadsBuilder_ == null) { + return regionLoads_.size(); + } else { + return regionLoadsBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad getRegionLoads(int index) { + if (regionLoadsBuilder_ == null) { + return regionLoads_.get(index); + } else { + return regionLoadsBuilder_.getMessage(index); + } + } + public Builder setRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad value) { + if (regionLoadsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionLoadsIsMutable(); + regionLoads_.set(index, value); + onChanged(); + } else { + regionLoadsBuilder_.setMessage(index, value); + } + return this; + } + public Builder setRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder builderForValue) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.set(index, builderForValue.build()); + onChanged(); + } else { + regionLoadsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addRegionLoads(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad value) { + if (regionLoadsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionLoadsIsMutable(); + regionLoads_.add(value); + onChanged(); + } else { + regionLoadsBuilder_.addMessage(value); + } + return this; + } + public Builder addRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad value) { + if (regionLoadsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionLoadsIsMutable(); + regionLoads_.add(index, value); + onChanged(); + } else { + regionLoadsBuilder_.addMessage(index, value); + } + return this; + } + public Builder addRegionLoads( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder builderForValue) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.add(builderForValue.build()); + onChanged(); + } else { + regionLoadsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder builderForValue) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.add(index, builderForValue.build()); + onChanged(); + } else { + regionLoadsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllRegionLoads( + java.lang.Iterable values) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + super.addAll(values, regionLoads_); + onChanged(); + } else { + regionLoadsBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearRegionLoads() { + if (regionLoadsBuilder_ == null) { + regionLoads_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + regionLoadsBuilder_.clear(); + } + return this; + } + public Builder removeRegionLoads(int index) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.remove(index); + onChanged(); + } else { + regionLoadsBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder getRegionLoadsBuilder( + int index) { + return getRegionLoadsFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( + int index) { + if (regionLoadsBuilder_ == null) { + return regionLoads_.get(index); } else { + return regionLoadsBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getRegionLoadsOrBuilderList() { + if (regionLoadsBuilder_ != null) { + return regionLoadsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionLoads_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder addRegionLoadsBuilder() { + return getRegionLoadsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder addRegionLoadsBuilder( + int index) { + return getRegionLoadsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.getDefaultInstance()); + } + public java.util.List + getRegionLoadsBuilderList() { + return getRegionLoadsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoadOrBuilder> + getRegionLoadsFieldBuilder() { + if (regionLoadsBuilder_ == null) { + regionLoadsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoadOrBuilder>( + regionLoads_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + regionLoads_ = null; + } + return regionLoadsBuilder_; + } + + // repeated .Coprocessor coprocessors = 6; + private java.util.List coprocessors_ = + java.util.Collections.emptyList(); + private void ensureCoprocessorsIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + coprocessors_ = new java.util.ArrayList(coprocessors_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> coprocessorsBuilder_; + + public java.util.List getCoprocessorsList() { + if (coprocessorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(coprocessors_); + } else { + return coprocessorsBuilder_.getMessageList(); + } + } + public int getCoprocessorsCount() { + if (coprocessorsBuilder_ == null) { + return coprocessors_.size(); + } else { + return coprocessorsBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) { + if (coprocessorsBuilder_ == null) { + return coprocessors_.get(index); + } else { + return coprocessorsBuilder_.getMessage(index); + } + } + public Builder setCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { + if (coprocessorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoprocessorsIsMutable(); + coprocessors_.set(index, value); + onChanged(); + } else { + coprocessorsBuilder_.setMessage(index, value); + } + return this; + } + public Builder setCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.set(index, builderForValue.build()); + onChanged(); + } else { + coprocessorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addCoprocessors(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { + if (coprocessorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoprocessorsIsMutable(); + coprocessors_.add(value); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(value); + } + return this; + } + public Builder addCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { + if (coprocessorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoprocessorsIsMutable(); + coprocessors_.add(index, value); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(index, value); + } + return this; + } + public Builder addCoprocessors( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.add(builderForValue.build()); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.add(index, builderForValue.build()); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllCoprocessors( + java.lang.Iterable values) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + super.addAll(values, coprocessors_); + onChanged(); + } else { + coprocessorsBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearCoprocessors() { + if (coprocessorsBuilder_ == null) { + coprocessors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + coprocessorsBuilder_.clear(); + } + return this; + } + public Builder removeCoprocessors(int index) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.remove(index); + onChanged(); + } else { + coprocessorsBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder getCoprocessorsBuilder( + int index) { + return getCoprocessorsFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( + int index) { + if (coprocessorsBuilder_ == null) { + return coprocessors_.get(index); } else { + return coprocessorsBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getCoprocessorsOrBuilderList() { + if (coprocessorsBuilder_ != null) { + return coprocessorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(coprocessors_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder() { + return getCoprocessorsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder( + int index) { + return getCoprocessorsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()); + } + public java.util.List + getCoprocessorsBuilderList() { + return getCoprocessorsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> + getCoprocessorsFieldBuilder() { + if (coprocessorsBuilder_ == null) { + coprocessorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>( + coprocessors_, + ((bitField0_ & 0x00000020) == 0x00000020), + getParentForChildren(), + isClean()); + coprocessors_ = null; + } + return coprocessorsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ServerLoad) + } + + static { + defaultInstance = new ServerLoad(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ServerLoad) + } + public interface TimeRangeOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -3070,6 +5843,422 @@ public final class HBaseProtos { // @@protoc_insertion_point(class_scope:ServerName) } + public interface CoprocessorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + boolean hasName(); + String getName(); + } + public static final class Coprocessor extends + com.google.protobuf.GeneratedMessage + implements CoprocessorOrBuilder { + // Use Coprocessor.newBuilder() to construct. + private Coprocessor(Builder builder) { + super(builder); + } + private Coprocessor(boolean noInit) {} + + private static final Coprocessor defaultInstance; + public static Coprocessor getDefaultInstance() { + return defaultInstance; + } + + public Coprocessor getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_fieldAccessorTable; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getName() { + java.lang.Object ref = name_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + name_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + name_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + name_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + void setName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:Coprocessor) + } + + static { + defaultInstance = new Coprocessor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Coprocessor) + } + public interface NameStringPairOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -4088,6 +7277,16 @@ public final class HBaseProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionSpecifier_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionLoad_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionLoad_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ServerLoad_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ServerLoad_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_TimeRange_descriptor; private static @@ -4103,6 +7302,11 @@ public final class HBaseProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_ServerName_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Coprocessor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Coprocessor_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_NameStringPair_descriptor; private static @@ -4129,19 +7333,35 @@ public final class HBaseProtos { " \002(\0162$.RegionSpecifier.RegionSpecifierTy" + "pe\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierType" + "\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME" + - "\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(" + - "\004\"w\n\010KeyValue\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002" + - "(\014\022\021\n\tqualifier\030\003 \002(\014\022\021\n\ttimestamp\030\004 \001(\004", - "\022\031\n\007keyType\030\005 \001(\0162\010.KeyType\022\r\n\005value\030\006 \001" + - "(\014\"?\n\nServerName\022\020\n\010hostName\030\001 \002(\t\022\014\n\004po" + - "rt\030\002 \001(\r\022\021\n\tstartCode\030\003 \001(\004\"-\n\016NameStrin" + - "gPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNa" + - "meBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014" + - "*_\n\007KeyType\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DEL" + - "ETE\020\010\022\021\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE_FAMIL" + - "Y\020\016\022\014\n\007MAXIMUM\020\377\001B>\n*org.apache.hadoop.h" + - "base.protobuf.generatedB\013HBaseProtosH\001\240\001" + - "\001" + "\020\002\"\270\003\n\nRegionLoad\022)\n\017regionSpecifier\030\001 \002" + + "(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" + + "storefiles\030\003 \001(\r\022\037\n\027storeUncompressedSiz", + "eMB\030\004 \001(\r\022\027\n\017storefileSizeMB\030\005 \001(\r\022\026\n\016me" + + "mstoreSizeMB\030\006 \001(\r\022\034\n\024storefileIndexSize" + + "MB\030\007 \001(\r\022\031\n\021readRequestsCount\030\010 \001(\004\022\032\n\022w" + + "riteRequestsCount\030\t \001(\004\022\032\n\022totalCompacti" + + "ngKVs\030\n \001(\004\022\033\n\023currentCompactedKVs\030\013 \001(\004" + + "\022\027\n\017rootIndexSizeKB\030\014 \001(\r\022\036\n\026totalStatic" + + "IndexSizeKB\030\r \001(\r\022\036\n\026totalStaticBloomSiz" + + "eKB\030\016 \001(\r\022\"\n\014coprocessors\030\017 \003(\0132\014.Coproc" + + "essor\"\263\001\n\nServerLoad\022\031\n\021requestsPerSecon" + + "d\030\001 \001(\r\022\035\n\025totalNumberOfRequests\030\002 \001(\r\022\022", + "\n\nusedHeapMB\030\003 \001(\r\022\021\n\tmaxHeapMB\030\004 \001(\r\022 \n" + + "\013regionLoads\030\005 \003(\0132\013.RegionLoad\022\"\n\014copro" + + "cessors\030\006 \003(\0132\014.Coprocessor\"%\n\tTimeRange" + + "\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"w\n\010KeyValue\022\013" + + "\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqualifier" + + "\030\003 \002(\014\022\021\n\ttimestamp\030\004 \001(\004\022\031\n\007keyType\030\005 \001" + + "(\0162\010.KeyType\022\r\n\005value\030\006 \001(\014\"?\n\nServerNam" + + "e\022\020\n\010hostName\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\021\n\tsta" + + "rtCode\030\003 \001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(" + + "\t\"-\n\016NameStringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005val", + "ue\030\002 \002(\t\",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022" + + "\r\n\005value\030\002 \001(\014*_\n\007KeyType\022\013\n\007MINIMUM\020\000\022\007" + + "\n\003PUT\020\004\022\n\n\006DELETE\020\010\022\021\n\rDELETE_COLUMN\020\014\022\021" + + "\n\rDELETE_FAMILY\020\016\022\014\n\007MAXIMUM\020\377\001B>\n*org.a" + + "pache.hadoop.hbase.protobuf.generatedB\013H" + + "BaseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -4164,8 +7384,24 @@ public final class HBaseProtos { new java.lang.String[] { "Type", "Value", }, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder.class); - internal_static_TimeRange_descriptor = + internal_static_RegionLoad_descriptor = getDescriptor().getMessageTypes().get(2); + internal_static_RegionLoad_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionLoad_descriptor, + new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "Coprocessors", }, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.class, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder.class); + internal_static_ServerLoad_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_ServerLoad_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ServerLoad_descriptor, + new java.lang.String[] { "RequestsPerSecond", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", }, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.class, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder.class); + internal_static_TimeRange_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_TimeRange_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TimeRange_descriptor, @@ -4173,7 +7409,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder.class); internal_static_KeyValue_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(5); internal_static_KeyValue_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_KeyValue_descriptor, @@ -4181,15 +7417,23 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.KeyValue.Builder.class); internal_static_ServerName_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(6); internal_static_ServerName_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerName_descriptor, new java.lang.String[] { "HostName", "Port", "StartCode", }, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder.class); + internal_static_Coprocessor_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_Coprocessor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Coprocessor_descriptor, + new java.lang.String[] { "Name", }, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.class, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder.class); internal_static_NameStringPair_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(8); internal_static_NameStringPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameStringPair_descriptor, @@ -4197,7 +7441,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder.class); internal_static_NameBytesPair_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(9); internal_static_NameBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameBytesPair_descriptor, diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java new file mode 100644 index 00000000000..2e2ad894b2f --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java @@ -0,0 +1,3427 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: RegionServerStatus.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class RegionServerStatusProtos { + private RegionServerStatusProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface RegionServerStartupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint32 port = 1; + boolean hasPort(); + int getPort(); + + // required uint64 serverStartCode = 2; + boolean hasServerStartCode(); + long getServerStartCode(); + + // required uint64 serverCurrentTime = 3; + boolean hasServerCurrentTime(); + long getServerCurrentTime(); + } + public static final class RegionServerStartupRequest extends + com.google.protobuf.GeneratedMessage + implements RegionServerStartupRequestOrBuilder { + // Use RegionServerStartupRequest.newBuilder() to construct. + private RegionServerStartupRequest(Builder builder) { + super(builder); + } + private RegionServerStartupRequest(boolean noInit) {} + + private static final RegionServerStartupRequest defaultInstance; + public static RegionServerStartupRequest getDefaultInstance() { + return defaultInstance; + } + + public RegionServerStartupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupRequest_fieldAccessorTable; + } + + private int bitField0_; + // required uint32 port = 1; + public static final int PORT_FIELD_NUMBER = 1; + private int port_; + public boolean hasPort() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getPort() { + return port_; + } + + // required uint64 serverStartCode = 2; + public static final int SERVERSTARTCODE_FIELD_NUMBER = 2; + private long serverStartCode_; + public boolean hasServerStartCode() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getServerStartCode() { + return serverStartCode_; + } + + // required uint64 serverCurrentTime = 3; + public static final int SERVERCURRENTTIME_FIELD_NUMBER = 3; + private long serverCurrentTime_; + public boolean hasServerCurrentTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getServerCurrentTime() { + return serverCurrentTime_; + } + + private void initFields() { + port_ = 0; + serverStartCode_ = 0L; + serverCurrentTime_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasPort()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasServerStartCode()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasServerCurrentTime()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, port_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, serverStartCode_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, serverCurrentTime_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, port_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, serverStartCode_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, serverCurrentTime_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest) obj; + + boolean result = true; + result = result && (hasPort() == other.hasPort()); + if (hasPort()) { + result = result && (getPort() + == other.getPort()); + } + result = result && (hasServerStartCode() == other.hasServerStartCode()); + if (hasServerStartCode()) { + result = result && (getServerStartCode() + == other.getServerStartCode()); + } + result = result && (hasServerCurrentTime() == other.hasServerCurrentTime()); + if (hasServerCurrentTime()) { + result = result && (getServerCurrentTime() + == other.getServerCurrentTime()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasPort()) { + hash = (37 * hash) + PORT_FIELD_NUMBER; + hash = (53 * hash) + getPort(); + } + if (hasServerStartCode()) { + hash = (37 * hash) + SERVERSTARTCODE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getServerStartCode()); + } + if (hasServerCurrentTime()) { + hash = (37 * hash) + SERVERCURRENTTIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getServerCurrentTime()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + port_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + serverStartCode_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + serverCurrentTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.port_ = port_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.serverStartCode_ = serverStartCode_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.serverCurrentTime_ = serverCurrentTime_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.getDefaultInstance()) return this; + if (other.hasPort()) { + setPort(other.getPort()); + } + if (other.hasServerStartCode()) { + setServerStartCode(other.getServerStartCode()); + } + if (other.hasServerCurrentTime()) { + setServerCurrentTime(other.getServerCurrentTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPort()) { + + return false; + } + if (!hasServerStartCode()) { + + return false; + } + if (!hasServerCurrentTime()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + port_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + serverStartCode_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + serverCurrentTime_ = input.readUInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required uint32 port = 1; + private int port_ ; + public boolean hasPort() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getPort() { + return port_; + } + public Builder setPort(int value) { + bitField0_ |= 0x00000001; + port_ = value; + onChanged(); + return this; + } + public Builder clearPort() { + bitField0_ = (bitField0_ & ~0x00000001); + port_ = 0; + onChanged(); + return this; + } + + // required uint64 serverStartCode = 2; + private long serverStartCode_ ; + public boolean hasServerStartCode() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getServerStartCode() { + return serverStartCode_; + } + public Builder setServerStartCode(long value) { + bitField0_ |= 0x00000002; + serverStartCode_ = value; + onChanged(); + return this; + } + public Builder clearServerStartCode() { + bitField0_ = (bitField0_ & ~0x00000002); + serverStartCode_ = 0L; + onChanged(); + return this; + } + + // required uint64 serverCurrentTime = 3; + private long serverCurrentTime_ ; + public boolean hasServerCurrentTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getServerCurrentTime() { + return serverCurrentTime_; + } + public Builder setServerCurrentTime(long value) { + bitField0_ |= 0x00000004; + serverCurrentTime_ = value; + onChanged(); + return this; + } + public Builder clearServerCurrentTime() { + bitField0_ = (bitField0_ & ~0x00000004); + serverCurrentTime_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RegionServerStartupRequest) + } + + static { + defaultInstance = new RegionServerStartupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionServerStartupRequest) + } + + public interface RegionServerStartupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .NameStringPair mapEntries = 1; + java.util.List + getMapEntriesList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getMapEntries(int index); + int getMapEntriesCount(); + java.util.List + getMapEntriesOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getMapEntriesOrBuilder( + int index); + } + public static final class RegionServerStartupResponse extends + com.google.protobuf.GeneratedMessage + implements RegionServerStartupResponseOrBuilder { + // Use RegionServerStartupResponse.newBuilder() to construct. + private RegionServerStartupResponse(Builder builder) { + super(builder); + } + private RegionServerStartupResponse(boolean noInit) {} + + private static final RegionServerStartupResponse defaultInstance; + public static RegionServerStartupResponse getDefaultInstance() { + return defaultInstance; + } + + public RegionServerStartupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupResponse_fieldAccessorTable; + } + + // repeated .NameStringPair mapEntries = 1; + public static final int MAPENTRIES_FIELD_NUMBER = 1; + private java.util.List mapEntries_; + public java.util.List getMapEntriesList() { + return mapEntries_; + } + public java.util.List + getMapEntriesOrBuilderList() { + return mapEntries_; + } + public int getMapEntriesCount() { + return mapEntries_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getMapEntries(int index) { + return mapEntries_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getMapEntriesOrBuilder( + int index) { + return mapEntries_.get(index); + } + + private void initFields() { + mapEntries_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getMapEntriesCount(); i++) { + if (!getMapEntries(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < mapEntries_.size(); i++) { + output.writeMessage(1, mapEntries_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < mapEntries_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, mapEntries_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse) obj; + + boolean result = true; + result = result && getMapEntriesList() + .equals(other.getMapEntriesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getMapEntriesCount() > 0) { + hash = (37 * hash) + MAPENTRIES_FIELD_NUMBER; + hash = (53 * hash) + getMapEntriesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerStartupResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getMapEntriesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (mapEntriesBuilder_ == null) { + mapEntries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + mapEntriesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse(this); + int from_bitField0_ = bitField0_; + if (mapEntriesBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + mapEntries_ = java.util.Collections.unmodifiableList(mapEntries_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.mapEntries_ = mapEntries_; + } else { + result.mapEntries_ = mapEntriesBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance()) return this; + if (mapEntriesBuilder_ == null) { + if (!other.mapEntries_.isEmpty()) { + if (mapEntries_.isEmpty()) { + mapEntries_ = other.mapEntries_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureMapEntriesIsMutable(); + mapEntries_.addAll(other.mapEntries_); + } + onChanged(); + } + } else { + if (!other.mapEntries_.isEmpty()) { + if (mapEntriesBuilder_.isEmpty()) { + mapEntriesBuilder_.dispose(); + mapEntriesBuilder_ = null; + mapEntries_ = other.mapEntries_; + bitField0_ = (bitField0_ & ~0x00000001); + mapEntriesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getMapEntriesFieldBuilder() : null; + } else { + mapEntriesBuilder_.addAllMessages(other.mapEntries_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getMapEntriesCount(); i++) { + if (!getMapEntries(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addMapEntries(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // repeated .NameStringPair mapEntries = 1; + private java.util.List mapEntries_ = + java.util.Collections.emptyList(); + private void ensureMapEntriesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + mapEntries_ = new java.util.ArrayList(mapEntries_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> mapEntriesBuilder_; + + public java.util.List getMapEntriesList() { + if (mapEntriesBuilder_ == null) { + return java.util.Collections.unmodifiableList(mapEntries_); + } else { + return mapEntriesBuilder_.getMessageList(); + } + } + public int getMapEntriesCount() { + if (mapEntriesBuilder_ == null) { + return mapEntries_.size(); + } else { + return mapEntriesBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getMapEntries(int index) { + if (mapEntriesBuilder_ == null) { + return mapEntries_.get(index); + } else { + return mapEntriesBuilder_.getMessage(index); + } + } + public Builder setMapEntries( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (mapEntriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMapEntriesIsMutable(); + mapEntries_.set(index, value); + onChanged(); + } else { + mapEntriesBuilder_.setMessage(index, value); + } + return this; + } + public Builder setMapEntries( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (mapEntriesBuilder_ == null) { + ensureMapEntriesIsMutable(); + mapEntries_.set(index, builderForValue.build()); + onChanged(); + } else { + mapEntriesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addMapEntries(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (mapEntriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMapEntriesIsMutable(); + mapEntries_.add(value); + onChanged(); + } else { + mapEntriesBuilder_.addMessage(value); + } + return this; + } + public Builder addMapEntries( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (mapEntriesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMapEntriesIsMutable(); + mapEntries_.add(index, value); + onChanged(); + } else { + mapEntriesBuilder_.addMessage(index, value); + } + return this; + } + public Builder addMapEntries( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (mapEntriesBuilder_ == null) { + ensureMapEntriesIsMutable(); + mapEntries_.add(builderForValue.build()); + onChanged(); + } else { + mapEntriesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addMapEntries( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (mapEntriesBuilder_ == null) { + ensureMapEntriesIsMutable(); + mapEntries_.add(index, builderForValue.build()); + onChanged(); + } else { + mapEntriesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllMapEntries( + java.lang.Iterable values) { + if (mapEntriesBuilder_ == null) { + ensureMapEntriesIsMutable(); + super.addAll(values, mapEntries_); + onChanged(); + } else { + mapEntriesBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearMapEntries() { + if (mapEntriesBuilder_ == null) { + mapEntries_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + mapEntriesBuilder_.clear(); + } + return this; + } + public Builder removeMapEntries(int index) { + if (mapEntriesBuilder_ == null) { + ensureMapEntriesIsMutable(); + mapEntries_.remove(index); + onChanged(); + } else { + mapEntriesBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getMapEntriesBuilder( + int index) { + return getMapEntriesFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getMapEntriesOrBuilder( + int index) { + if (mapEntriesBuilder_ == null) { + return mapEntries_.get(index); } else { + return mapEntriesBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getMapEntriesOrBuilderList() { + if (mapEntriesBuilder_ != null) { + return mapEntriesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(mapEntries_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addMapEntriesBuilder() { + return getMapEntriesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addMapEntriesBuilder( + int index) { + return getMapEntriesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); + } + public java.util.List + getMapEntriesBuilderList() { + return getMapEntriesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> + getMapEntriesFieldBuilder() { + if (mapEntriesBuilder_ == null) { + mapEntriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>( + mapEntries_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + mapEntries_ = null; + } + return mapEntriesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RegionServerStartupResponse) + } + + static { + defaultInstance = new RegionServerStartupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionServerStartupResponse) + } + + public interface RegionServerReportRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ServerName server = 1; + boolean hasServer(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder(); + + // optional .ServerLoad load = 2; + boolean hasLoad(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder(); + } + public static final class RegionServerReportRequest extends + com.google.protobuf.GeneratedMessage + implements RegionServerReportRequestOrBuilder { + // Use RegionServerReportRequest.newBuilder() to construct. + private RegionServerReportRequest(Builder builder) { + super(builder); + } + private RegionServerReportRequest(boolean noInit) {} + + private static final RegionServerReportRequest defaultInstance; + public static RegionServerReportRequest getDefaultInstance() { + return defaultInstance; + } + + public RegionServerReportRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportRequest_fieldAccessorTable; + } + + private int bitField0_; + // required .ServerName server = 1; + public static final int SERVER_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_; + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { + return server_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { + return server_; + } + + // optional .ServerLoad load = 2; + public static final int LOAD_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad load_; + public boolean hasLoad() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad() { + return load_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder() { + return load_; + } + + private void initFields() { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServer()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasLoad()) { + if (!getLoad().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, server_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, load_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, server_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, load_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest) obj; + + boolean result = true; + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && (hasLoad() == other.hasLoad()); + if (hasLoad()) { + result = result && getLoad() + .equals(other.getLoad()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); + } + if (hasLoad()) { + hash = (37 * hash) + LOAD_FIELD_NUMBER; + hash = (53 * hash) + getLoad().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerFieldBuilder(); + getLoadFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (loadBuilder_ == null) { + load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance(); + } else { + loadBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (serverBuilder_ == null) { + result.server_ = server_; + } else { + result.server_ = serverBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (loadBuilder_ == null) { + result.load_ = load_; + } else { + result.load_ = loadBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance()) return this; + if (other.hasServer()) { + mergeServer(other.getServer()); + } + if (other.hasLoad()) { + mergeLoad(other.getLoad()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServer()) { + + return false; + } + if (!getServer().isInitialized()) { + + return false; + } + if (hasLoad()) { + if (!getLoad().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); + if (hasServer()) { + subBuilder.mergeFrom(getServer()); + } + input.readMessage(subBuilder, extensionRegistry); + setServer(subBuilder.buildPartial()); + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.newBuilder(); + if (hasLoad()) { + subBuilder.mergeFrom(getLoad()); + } + input.readMessage(subBuilder, extensionRegistry); + setLoad(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .ServerName server = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_; + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { + if (serverBuilder_ == null) { + return server_; + } else { + return serverBuilder_.getMessage(); + } + } + public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + server_ = value; + onChanged(); + } else { + serverBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setServer( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverBuilder_ == null) { + server_ = builderForValue.build(); + onChanged(); + } else { + serverBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + server_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial(); + } else { + server_ = value; + } + onChanged(); + } else { + serverBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearServer() { + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getServerFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { + if (serverBuilder_ != null) { + return serverBuilder_.getMessageOrBuilder(); + } else { + return server_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerFieldBuilder() { + if (serverBuilder_ == null) { + serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + server_, + getParentForChildren(), + isClean()); + server_ = null; + } + return serverBuilder_; + } + + // optional .ServerLoad load = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder> loadBuilder_; + public boolean hasLoad() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad() { + if (loadBuilder_ == null) { + return load_; + } else { + return loadBuilder_.getMessage(); + } + } + public Builder setLoad(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad value) { + if (loadBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + load_ = value; + onChanged(); + } else { + loadBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setLoad( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder builderForValue) { + if (loadBuilder_ == null) { + load_ = builderForValue.build(); + onChanged(); + } else { + loadBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeLoad(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad value) { + if (loadBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + load_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance()) { + load_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial(); + } else { + load_ = value; + } + onChanged(); + } else { + loadBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearLoad() { + if (loadBuilder_ == null) { + load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance(); + onChanged(); + } else { + loadBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder getLoadBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getLoadFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder() { + if (loadBuilder_ != null) { + return loadBuilder_.getMessageOrBuilder(); + } else { + return load_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder> + getLoadFieldBuilder() { + if (loadBuilder_ == null) { + loadBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder>( + load_, + getParentForChildren(), + isClean()); + load_ = null; + } + return loadBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RegionServerReportRequest) + } + + static { + defaultInstance = new RegionServerReportRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionServerReportRequest) + } + + public interface RegionServerReportResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class RegionServerReportResponse extends + com.google.protobuf.GeneratedMessage + implements RegionServerReportResponseOrBuilder { + // Use RegionServerReportResponse.newBuilder() to construct. + private RegionServerReportResponse(Builder builder) { + super(builder); + } + private RegionServerReportResponse(boolean noInit) {} + + private static final RegionServerReportResponse defaultInstance; + public static RegionServerReportResponse getDefaultInstance() { + return defaultInstance; + } + + public RegionServerReportResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportResponse_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_RegionServerReportResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:RegionServerReportResponse) + } + + static { + defaultInstance = new RegionServerReportResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionServerReportResponse) + } + + public interface ReportRSFatalErrorRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ServerName server = 1; + boolean hasServer(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder(); + + // required string errorMessage = 2; + boolean hasErrorMessage(); + String getErrorMessage(); + } + public static final class ReportRSFatalErrorRequest extends + com.google.protobuf.GeneratedMessage + implements ReportRSFatalErrorRequestOrBuilder { + // Use ReportRSFatalErrorRequest.newBuilder() to construct. + private ReportRSFatalErrorRequest(Builder builder) { + super(builder); + } + private ReportRSFatalErrorRequest(boolean noInit) {} + + private static final ReportRSFatalErrorRequest defaultInstance; + public static ReportRSFatalErrorRequest getDefaultInstance() { + return defaultInstance; + } + + public ReportRSFatalErrorRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorRequest_fieldAccessorTable; + } + + private int bitField0_; + // required .ServerName server = 1; + public static final int SERVER_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_; + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { + return server_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { + return server_; + } + + // required string errorMessage = 2; + public static final int ERRORMESSAGE_FIELD_NUMBER = 2; + private java.lang.Object errorMessage_; + public boolean hasErrorMessage() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + errorMessage_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + errorMessage_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServer()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasErrorMessage()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, server_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getErrorMessageBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, server_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getErrorMessageBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest) obj; + + boolean result = true; + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && (hasErrorMessage() == other.hasErrorMessage()); + if (hasErrorMessage()) { + result = result && getErrorMessage() + .equals(other.getErrorMessage()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); + } + if (hasErrorMessage()) { + hash = (37 * hash) + ERRORMESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getErrorMessage().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + errorMessage_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (serverBuilder_ == null) { + result.server_ = server_; + } else { + result.server_ = serverBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.errorMessage_ = errorMessage_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance()) return this; + if (other.hasServer()) { + mergeServer(other.getServer()); + } + if (other.hasErrorMessage()) { + setErrorMessage(other.getErrorMessage()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServer()) { + + return false; + } + if (!hasErrorMessage()) { + + return false; + } + if (!getServer().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); + if (hasServer()) { + subBuilder.mergeFrom(getServer()); + } + input.readMessage(subBuilder, extensionRegistry); + setServer(subBuilder.buildPartial()); + break; + } + case 18: { + bitField0_ |= 0x00000002; + errorMessage_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required .ServerName server = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_; + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { + if (serverBuilder_ == null) { + return server_; + } else { + return serverBuilder_.getMessage(); + } + } + public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + server_ = value; + onChanged(); + } else { + serverBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setServer( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverBuilder_ == null) { + server_ = builderForValue.build(); + onChanged(); + } else { + serverBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + server_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial(); + } else { + server_ = value; + } + onChanged(); + } else { + serverBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearServer() { + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getServerFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { + if (serverBuilder_ != null) { + return serverBuilder_.getMessageOrBuilder(); + } else { + return server_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerFieldBuilder() { + if (serverBuilder_ == null) { + serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + server_, + getParentForChildren(), + isClean()); + server_ = null; + } + return serverBuilder_; + } + + // required string errorMessage = 2; + private java.lang.Object errorMessage_ = ""; + public boolean hasErrorMessage() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getErrorMessage() { + java.lang.Object ref = errorMessage_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + errorMessage_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setErrorMessage(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + errorMessage_ = value; + onChanged(); + return this; + } + public Builder clearErrorMessage() { + bitField0_ = (bitField0_ & ~0x00000002); + errorMessage_ = getDefaultInstance().getErrorMessage(); + onChanged(); + return this; + } + void setErrorMessage(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000002; + errorMessage_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:ReportRSFatalErrorRequest) + } + + static { + defaultInstance = new ReportRSFatalErrorRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReportRSFatalErrorRequest) + } + + public interface ReportRSFatalErrorResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class ReportRSFatalErrorResponse extends + com.google.protobuf.GeneratedMessage + implements ReportRSFatalErrorResponseOrBuilder { + // Use ReportRSFatalErrorResponse.newBuilder() to construct. + private ReportRSFatalErrorResponse(Builder builder) { + super(builder); + } + private ReportRSFatalErrorResponse(boolean noInit) {} + + private static final ReportRSFatalErrorResponse defaultInstance; + public static ReportRSFatalErrorResponse getDefaultInstance() { + return defaultInstance; + } + + public ReportRSFatalErrorResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorResponse_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_ReportRSFatalErrorResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:ReportRSFatalErrorResponse) + } + + static { + defaultInstance = new ReportRSFatalErrorResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReportRSFatalErrorResponse) + } + + public static abstract class RegionServerStatusService + implements com.google.protobuf.Service { + protected RegionServerStatusService() {} + + public interface Interface { + public abstract void regionServerStartup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void regionServerReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void reportRSFatalError( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new RegionServerStatusService() { + @java.lang.Override + public void regionServerStartup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request, + com.google.protobuf.RpcCallback done) { + impl.regionServerStartup(controller, request, done); + } + + @java.lang.Override + public void regionServerReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request, + com.google.protobuf.RpcCallback done) { + impl.regionServerReport(controller, request, done); + } + + @java.lang.Override + public void reportRSFatalError( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request, + com.google.protobuf.RpcCallback done) { + impl.reportRSFatalError(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.regionServerStartup(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest)request); + case 1: + return impl.regionServerReport(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest)request); + case 2: + return impl.reportRSFatalError(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + public abstract void regionServerStartup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void regionServerReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void reportRSFatalError( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.regionServerStartup(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.regionServerReport(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: + this.reportRSFatalError(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void regionServerStartup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance())); + } + + public void regionServerReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance())); + } + + public void reportRSFatalError( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse regionServerStartup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse regionServerReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse reportRSFatalError( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse regionServerStartup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse regionServerReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse reportRSFatalError( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance()); + } + + } + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionServerStartupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionServerStartupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionServerStartupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionServerStartupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionServerReportRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionServerReportRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionServerReportResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionServerReportResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReportRSFatalErrorRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReportRSFatalErrorRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReportRSFatalErrorResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReportRSFatalErrorResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\030RegionServerStatus.proto\032\013hbase.proto\"" + + "^\n\032RegionServerStartupRequest\022\014\n\004port\030\001 " + + "\002(\r\022\027\n\017serverStartCode\030\002 \002(\004\022\031\n\021serverCu" + + "rrentTime\030\003 \002(\004\"B\n\033RegionServerStartupRe" + + "sponse\022#\n\nmapEntries\030\001 \003(\0132\017.NameStringP" + + "air\"S\n\031RegionServerReportRequest\022\033\n\006serv" + + "er\030\001 \002(\0132\013.ServerName\022\031\n\004load\030\002 \001(\0132\013.Se" + + "rverLoad\"\034\n\032RegionServerReportResponse\"N" + + "\n\031ReportRSFatalErrorRequest\022\033\n\006server\030\001 " + + "\002(\0132\013.ServerName\022\024\n\014errorMessage\030\002 \002(\t\"\034", + "\n\032ReportRSFatalErrorResponse2\213\002\n\031RegionS" + + "erverStatusService\022P\n\023regionServerStartu" + + "p\022\033.RegionServerStartupRequest\032\034.RegionS" + + "erverStartupResponse\022M\n\022regionServerRepo" + + "rt\022\032.RegionServerReportRequest\032\033.RegionS" + + "erverReportResponse\022M\n\022reportRSFatalErro" + + "r\022\032.ReportRSFatalErrorRequest\032\033.ReportRS" + + "FatalErrorResponseBN\n*org.apache.hadoop." + + "hbase.protobuf.generatedB\030RegionServerSt" + + "atusProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_RegionServerStartupRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_RegionServerStartupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionServerStartupRequest_descriptor, + new java.lang.String[] { "Port", "ServerStartCode", "ServerCurrentTime", }, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.class, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.Builder.class); + internal_static_RegionServerStartupResponse_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_RegionServerStartupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionServerStartupResponse_descriptor, + new java.lang.String[] { "MapEntries", }, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builder.class); + internal_static_RegionServerReportRequest_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_RegionServerReportRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionServerReportRequest_descriptor, + new java.lang.String[] { "Server", "Load", }, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.class, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.Builder.class); + internal_static_RegionServerReportResponse_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_RegionServerReportResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionServerReportResponse_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.Builder.class); + internal_static_ReportRSFatalErrorRequest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_ReportRSFatalErrorRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReportRSFatalErrorRequest_descriptor, + new java.lang.String[] { "Server", "ErrorMessage", }, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.class, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.Builder.class); + internal_static_ReportRSFatalErrorResponse_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_ReportRSFatalErrorResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReportRSFatalErrorResponse_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 6cd64871da1..96ac8bd8c3e 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -67,7 +67,6 @@ import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerInfo; -import org.apache.hadoop.hbase.HServerLoad; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotServingRegionException; @@ -112,7 +111,6 @@ import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.HBaseRpcMetrics; -import org.apache.hadoop.hbase.ipc.HMasterRegionInterface; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.Invocation; import org.apache.hadoop.hbase.ipc.ProtocolSignature; @@ -147,7 +145,6 @@ import org.apache.hadoop.hbase.zookeeper.RootRegionTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.io.MapWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.metrics.util.MBeanUtil; @@ -156,9 +153,24 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; import org.codehaus.jackson.map.ObjectMapper; +import com.google.protobuf.ServiceException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; +import org.apache.hadoop.hbase.ipc.RegionServerStatusProtocol; import com.google.common.base.Function; import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; /** * HRegionServer makes a set of HRegions available to clients. It checks in with @@ -191,7 +203,7 @@ public class HRegionServer extends RegionServer protected final int numRegionsToReport; // Remote HMaster - private HMasterRegionInterface hbaseMaster; + private RegionServerStatusProtocol hbaseMaster; // Server to handle client requests. Default access so can be accessed by // unit tests. @@ -589,7 +601,7 @@ public class HRegionServer extends RegionServer // Try and register with the Master; tell it we are here. Break if // server is stopped or the clusterup flag is down or hdfs went wacky. while (keepLooping()) { - MapWritable w = reportForDuty(); + RegionServerStartupResponse w = reportForDuty(); if (w == null) { LOG.warn("reportForDuty failed; sleeping and then retrying."); this.sleeper.sleep(); @@ -737,15 +749,18 @@ public class HRegionServer extends RegionServer void tryRegionServerReport() throws IOException { - HServerLoad hsl = buildServerLoad(); + HBaseProtos.ServerLoad sl = buildServerLoad(); // Why we do this? this.requestCount.set(0); try { - this.hbaseMaster.regionServerReport(this.serverNameFromMasterPOV.getVersionedBytes(), hsl); - } catch (IOException ioe) { - if (ioe instanceof RemoteException) { - ioe = ((RemoteException)ioe).unwrapRemoteException(); - } + RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder(); + ServerName sn = ServerName.parseVersionedServerName( + this.serverNameFromMasterPOV.getVersionedBytes()); + request.setServer(ProtobufUtil.toServerName(sn)); + request.setLoad(sl); + this.hbaseMaster.regionServerReport(null, request.build()); + } catch (ServiceException se) { + IOException ioe = ProtobufUtil.getRemoteException(se); if (ioe instanceof YouAreDeadException) { // This will be caught and handled as a fatal error in run() throw ioe; @@ -756,19 +771,26 @@ public class HRegionServer extends RegionServer } } - HServerLoad buildServerLoad() { + HBaseProtos.ServerLoad buildServerLoad() { Collection regions = getOnlineRegionsLocalContext(); - TreeMap regionLoads = - new TreeMap(Bytes.BYTES_COMPARATOR); - for (HRegion region: regions) { - regionLoads.put(region.getRegionName(), createRegionLoad(region)); - } MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); - return new HServerLoad(requestCount.get(),(int)metrics.getRequests(), - (int)(memory.getUsed() / 1024 / 1024), - (int) (memory.getMax() / 1024 / 1024), regionLoads, - this.hlog.getCoprocessorHost().getCoprocessors()); + + HBaseProtos.ServerLoad.Builder serverLoad = HBaseProtos.ServerLoad.newBuilder(); + serverLoad.setRequestsPerSecond((int)metrics.getRequests()); + serverLoad.setTotalNumberOfRequests(requestCount.get()); + serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024)); + serverLoad.setMaxHeapMB((int) (memory.getMax() / 1024 / 1024)); + Set coprocessors = this.hlog.getCoprocessorHost().getCoprocessors(); + for (String coprocessor : coprocessors) { + serverLoad.addCoprocessors( + Coprocessor.newBuilder().setName(coprocessor).build()); + } + for (HRegion region : regions) { + serverLoad.addRegionLoads(createRegionLoad(region)); + } + + return serverLoad.build(); } String getOnlineRegionsAsPrintableString() { @@ -858,14 +880,14 @@ public class HRegionServer extends RegionServer * * @param c Extra configuration. */ - protected void handleReportForDutyResponse(final MapWritable c) + protected void handleReportForDutyResponse(final RegionServerStartupResponse c) throws IOException { try { - for (Map.Entry e :c.entrySet()) { - String key = e.getKey().toString(); + for (NameStringPair e : c.getMapEntriesList()) { + String key = e.getName(); // The hostname the master sees us as. if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) { - String hostnameFromMasterPOV = e.getValue().toString(); + String hostnameFromMasterPOV = e.getValue(); this.serverNameFromMasterPOV = new ServerName(hostnameFromMasterPOV, this.isa.getPort(), this.startcode); LOG.info("Master passed us hostname to use. Was=" + @@ -943,7 +965,7 @@ public class HRegionServer extends RegionServer * * @throws IOException */ - private HServerLoad.RegionLoad createRegionLoad(final HRegion r) { + private RegionLoad createRegionLoad(final HRegion r) { byte[] name = r.getRegionName(); int stores = 0; int storefiles = 0; @@ -980,20 +1002,38 @@ public class HRegionServer extends RegionServer (int) (store.getTotalStaticBloomSize() / 1024); } } - return new HServerLoad.RegionLoad(name, stores, storefiles, - storeUncompressedSizeMB, - storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB, rootIndexSizeKB, - totalStaticIndexSizeKB, totalStaticBloomSizeKB, - (int) r.readRequestsCount.get(), (int) r.writeRequestsCount.get(), - totalCompactingKVs, currentCompactedKVs, - r.getCoprocessorHost().getCoprocessors()); + RegionLoad.Builder regionLoad = RegionLoad.newBuilder(); + RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); + regionSpecifier.setType(RegionSpecifierType.REGION_NAME); + regionSpecifier.setValue(ByteString.copyFrom(name)); + regionLoad.setRegionSpecifier(regionSpecifier.build()) + .setStores(stores) + .setStorefiles(storefiles) + .setStoreUncompressedSizeMB(storeUncompressedSizeMB) + .setStorefileSizeMB(storefileSizeMB) + .setMemstoreSizeMB(memstoreSizeMB) + .setStorefileIndexSizeMB(storefileIndexSizeMB) + .setRootIndexSizeKB(rootIndexSizeKB) + .setTotalStaticIndexSizeKB(totalStaticIndexSizeKB) + .setTotalStaticBloomSizeKB(totalStaticBloomSizeKB) + .setReadRequestsCount((int) r.readRequestsCount.get()) + .setWriteRequestsCount((int) r.writeRequestsCount.get()) + .setTotalCompactingKVs(totalCompactingKVs) + .setCurrentCompactedKVs(currentCompactedKVs); + Set coprocessors = r.getCoprocessorHost().getCoprocessors(); + for (String coprocessor : coprocessors) { + regionLoad.addCoprocessors( + Coprocessor.newBuilder().setName(coprocessor).build()); + } + + return regionLoad.build(); } /** * @param encodedRegionName * @return An instance of RegionLoad. */ - public HServerLoad.RegionLoad createRegionLoad(final String encodedRegionName) { + public RegionLoad createRegionLoad(final String encodedRegionName) { HRegion r = null; r = this.onlineRegions.get(encodedRegionName); return r != null ? createRegionLoad(r) : null; @@ -1507,8 +1547,14 @@ public class HRegionServer extends RegionServer msg += "\nCause:\n" + StringUtils.stringifyException(cause); } if (hbaseMaster != null) { + ReportRSFatalErrorRequest.Builder builder = + ReportRSFatalErrorRequest.newBuilder(); + ServerName sn = + ServerName.parseVersionedServerName(this.serverNameFromMasterPOV.getVersionedBytes()); + builder.setServer(ProtobufUtil.toServerName(sn)); + builder.setErrorMessage(msg); hbaseMaster.reportRSFatalError( - this.serverNameFromMasterPOV.getVersionedBytes(), msg); + null,builder.build()); } } catch (Throwable t) { LOG.warn("Unable to report fatal error to master", t); @@ -1588,7 +1634,7 @@ public class HRegionServer extends RegionServer private ServerName getMaster() { ServerName masterServerName = null; long previousLogTime = 0; - HMasterRegionInterface master = null; + RegionServerStatusProtocol master = null; boolean refresh = false; // for the first time, use cached data while (keepLooping() && master == null) { masterServerName = this.masterAddressManager.getMasterAddress(refresh); @@ -1614,8 +1660,8 @@ public class HRegionServer extends RegionServer try { // Do initial RPC setup. The final argument indicates that the RPC // should retry indefinitely. - master = (HMasterRegionInterface) HBaseRPC.waitForProxy( - HMasterRegionInterface.class, HMasterRegionInterface.VERSION, + master = (RegionServerStatusProtocol) HBaseRPC.waitForProxy( + RegionServerStatusProtocol.class, RegionServerStatusProtocol.VERSION, isa, this.conf, -1, this.rpcTimeout, this.rpcTimeout); } catch (IOException e) { @@ -1658,8 +1704,8 @@ public class HRegionServer extends RegionServer * null if we failed to register. * @throws IOException */ - private MapWritable reportForDuty() throws IOException { - MapWritable result = null; + private RegionServerStartupResponse reportForDuty() throws IOException { + RegionServerStartupResponse result = null; ServerName masterServerName = getMaster(); if (masterServerName == null) return result; try { @@ -1668,18 +1714,20 @@ public class HRegionServer extends RegionServer "with port=" + this.isa.getPort() + ", startcode=" + this.startcode); long now = EnvironmentEdgeManager.currentTimeMillis(); int port = this.isa.getPort(); - result = this.hbaseMaster.regionServerStartup(port, this.startcode, now); - } catch (RemoteException e) { - IOException ioe = e.unwrapRemoteException(); + RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); + request.setPort(port); + request.setServerStartCode(this.startcode); + request.setServerCurrentTime(now); + result = this.hbaseMaster.regionServerStartup(null, request.build()); + } catch (ServiceException se) { + IOException ioe = ProtobufUtil.getRemoteException(se); if (ioe instanceof ClockOutOfSyncException) { LOG.fatal("Master rejected startup because clock is out of sync", ioe); // Re-throw IOE will cause RS to abort throw ioe; } else { - LOG.warn("remote error telling master we are up", e); + LOG.warn("error telling master we are up", se); } - } catch (IOException e) { - LOG.warn("error telling master we are up", e); } return result; } @@ -3295,8 +3343,9 @@ public class HRegionServer extends RegionServer // used by org/apache/hbase/tmpl/regionserver/RSStatusTmpl.jamon (HBASE-4070). public String[] getCoprocessors() { - HServerLoad hsl = buildServerLoad(); - return hsl == null? null: hsl.getCoprocessors(); + HBaseProtos.ServerLoad sl = buildServerLoad(); + return sl == null? null: + ServerLoad.getRegionServerCoprocessors(new ServerLoad(sl)); } /** diff --git a/src/main/protobuf/RegionServerStatus.proto b/src/main/protobuf/RegionServerStatus.proto new file mode 100644 index 00000000000..9d7728f7b58 --- /dev/null +++ b/src/main/protobuf/RegionServerStatus.proto @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are used for MasterRegionProtocol. + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "RegionServerStatusProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "hbase.proto"; + +message RegionServerStartupRequest { + /** Port number this regionserver is up on */ + required uint32 port = 1; + + /** This servers' startcode */ + required uint64 serverStartCode = 2; + + /** Current time of the region server in ms */ + required uint64 serverCurrentTime = 3; +} + +message RegionServerStartupResponse { + /** + * Configuration for the regionserver to use: e.g. filesystem, + * hbase rootdir, the hostname to use creating the RegionServer ServerName, + * etc + */ + repeated NameStringPair mapEntries = 1; +} + +message RegionServerReportRequest { + required ServerName server = 1; + + /** load the server is under */ + optional ServerLoad load = 2; +} + +message RegionServerReportResponse { +} + +message ReportRSFatalErrorRequest { + /** name of the server experiencing the error */ + required ServerName server = 1; + + /** informative text to expose in the master logs and UI */ + required string errorMessage = 2; +} + +message ReportRSFatalErrorResponse { +} + +service RegionServerStatusService { + /** Called when a region server first starts. */ + rpc regionServerStartup(RegionServerStartupRequest) + returns(RegionServerStartupResponse); + + /** Called to report the load the RegionServer is under. */ + rpc regionServerReport(RegionServerReportRequest) + returns(RegionServerReportResponse); + + /** + * Called by a region server to report a fatal error that is causing it to + * abort. + */ + rpc reportRSFatalError(ReportRSFatalErrorRequest) + returns(ReportRSFatalErrorResponse); +} diff --git a/src/main/protobuf/hbase.proto b/src/main/protobuf/hbase.proto index 12e6053fc68..30a4c3f856e 100644 --- a/src/main/protobuf/hbase.proto +++ b/src/main/protobuf/hbase.proto @@ -54,6 +54,85 @@ message RegionSpecifier { } } +message RegionLoad { + /** the region specifier */ + required RegionSpecifier regionSpecifier = 1; + + /** the number of stores for the region */ + optional uint32 stores = 2; + + /** the number of storefiles for the region */ + optional uint32 storefiles = 3; + + /** the total size of the store files for the region, uncompressed, in MB */ + optional uint32 storeUncompressedSizeMB = 4; + + /** the current total size of the store files for the region, in MB */ + optional uint32 storefileSizeMB = 5; + + /** the current size of the memstore for the region, in MB */ + optional uint32 memstoreSizeMB = 6; + + /** + * The current total size of root-level store file indexes for the region, + * in MB. The same as {@link #rootIndexSizeKB} but in MB. + */ + optional uint32 storefileIndexSizeMB = 7; + + /** the current total read requests made to region */ + optional uint64 readRequestsCount = 8; + + /** the current total write requests made to region */ + optional uint64 writeRequestsCount = 9; + + /** the total compacting key values in currently running compaction */ + optional uint64 totalCompactingKVs = 10; + + /** the completed count of key values in currently running compaction */ + optional uint64 currentCompactedKVs = 11; + + /** The current total size of root-level indexes for the region, in KB. */ + optional uint32 rootIndexSizeKB = 12; + + /** The total size of all index blocks, not just the root level, in KB. */ + optional uint32 totalStaticIndexSizeKB = 13; + + /** + * The total size of all Bloom filter blocks, not just loaded into the + * block cache, in KB. + */ + optional uint32 totalStaticBloomSizeKB = 14; + + /** Region-level coprocessors. */ + repeated Coprocessor coprocessors = 15; +} + +/* Server-level protobufs */ + +message ServerLoad { + /** Number of requests per second since last report. */ + optional uint32 requestsPerSecond = 1; + + /** Total Number of requests from the start of the region server. */ + optional uint32 totalNumberOfRequests = 2; + + /** the amount of used heap, in MB. */ + optional uint32 usedHeapMB = 3; + + /** the maximum allowable size of the heap, in MB. */ + optional uint32 maxHeapMB = 4; + + /** Information on the load of individual regions. */ + repeated RegionLoad regionLoads = 5; + + /** + * Regionserver-level coprocessors, e.g., WALObserver implementations. + * Region-level coprocessors, on the other hand, are stored inside RegionLoad + * objects. + */ + repeated Coprocessor coprocessors = 6; +} + /** * A range of time. Both from and to are Java time * stamp in milliseconds. If you don't specify a time @@ -104,6 +183,10 @@ message ServerName { // Comment data structures +message Coprocessor { + required string name = 1; +} + message NameStringPair { required string name = 1; required string value = 2; diff --git a/src/main/resources/hbase-webapps/master/table.jsp b/src/main/resources/hbase-webapps/master/table.jsp index 3ef1190e9d2..ca7310c0d07 100644 --- a/src/main/resources/hbase-webapps/master/table.jsp +++ b/src/main/resources/hbase-webapps/master/table.jsp @@ -29,12 +29,14 @@ import="org.apache.hadoop.hbase.HServerAddress" import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.HServerInfo" - import="org.apache.hadoop.hbase.HServerLoad" - import="org.apache.hadoop.hbase.HServerLoad.RegionLoad" + import="org.apache.hadoop.hbase.ServerLoad;" + import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad" import="org.apache.hadoop.hbase.io.ImmutableBytesWritable" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.util.FSUtils" + import="org.apache.hadoop.hbase.protobuf.ProtobufUtil" + import="java.util.List" import="java.util.Map" import="org.apache.hadoop.hbase.HConstants"%><% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); @@ -176,11 +178,15 @@ String urlRegionServer = null; if (addr != null) { - HServerLoad sl = master.getServerManager().getLoad(addr); + ServerLoad sl = master.getServerManager().getLoad(addr); if (sl != null) { - Map map = sl.getRegionsLoad(); - if (map.containsKey(regionInfo.getRegionName())) { - req = map.get(regionInfo.getRegionName()).getRequestsCount(); + List list = sl.getRegionLoadsList(); + byte [] regionName = regionInfo.getRegionName(); + for (RegionLoad rgLoad : list) { + if (rgLoad.getRegionSpecifier().getValue().toByteArray().equals(regionName)) { + req = ProtobufUtil.getTotalRequestsCount(rgLoad); + break; + } } // This port might be wrong if RS actually ended up using something else. urlRegionServer = diff --git a/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 72554cb0e56..c7442ae57a2 100644 --- a/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.User; @@ -112,7 +113,8 @@ public class MiniHBaseCluster { */ @Override - protected void handleReportForDutyResponse(MapWritable c) throws IOException { + protected void handleReportForDutyResponse( + final RegionServerStartupResponse c) throws IOException { super.handleReportForDutyResponse(c); // Run this thread to shutdown our filesystem on way out. this.shutdownThread = new SingleFileSystemShutdownThread(getFileSystem()); diff --git a/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index d039be359c0..d69f5892e48 100644 --- a/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -26,14 +26,16 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad; import javax.tools.*; import java.io.*; import java.util.*; -import java.util.Arrays; import java.util.jar.*; import org.junit.*; @@ -550,22 +552,21 @@ public class TestClassLoading { /** * return the subset of all regionservers - * (actually returns set of HServerLoads) + * (actually returns set of ServerLoads) * which host some region in a given table. * used by assertAllRegionServers() below to * test reporting of loaded coprocessors. * @param tableName : given table. * @return subset of all servers. */ - Map serversForTable(String tableName) { - Map serverLoadHashMap = - new HashMap(); - for(Map.Entry server: + Map serversForTable(String tableName) { + Map serverLoadHashMap = + new HashMap(); + for(Map.Entry server: TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager(). getOnlineServers().entrySet()) { - for(Map.Entry region: - server.getValue().getRegionsLoad().entrySet()) { - if (region.getValue().getNameAsString().equals(tableName)) { + for (RegionLoad region : server.getValue().getRegionLoadsList()) { + if (Bytes.toString(region.getRegionSpecifier().getValue().toByteArray()).equals(tableName)) { // this server server hosts a region of tableName: add this server.. serverLoadHashMap.put(server.getKey(),server.getValue()); // .. and skip the rest of the regions that it hosts. @@ -578,7 +579,7 @@ public class TestClassLoading { void assertAllRegionServers(String[] expectedCoprocessors, String tableName) throws InterruptedException { - Map servers; + Map servers; String[] actualCoprocessors = null; boolean success = false; for(int i = 0; i < 5; i++) { @@ -591,8 +592,9 @@ public class TestClassLoading { servers = serversForTable(tableName); } boolean any_failed = false; - for(Map.Entry server: servers.entrySet()) { - actualCoprocessors = server.getValue().getCoprocessors(); + for(Map.Entry server: servers.entrySet()) { + actualCoprocessors = + ServerLoad.getAllCoprocessors(server.getValue()); if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) { LOG.debug("failed comparison: actual: " + Arrays.toString(actualCoprocessors) + diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java b/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java index d07a763e249..b17ed1fee2c 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerLoad; +import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.RegionTransition; import org.apache.hadoop.hbase.Server; @@ -128,9 +128,9 @@ public class TestAssignmentManager { this.serverManager = Mockito.mock(ServerManager.class); Mockito.when(this.serverManager.isServerOnline(SERVERNAME_A)).thenReturn(true); Mockito.when(this.serverManager.isServerOnline(SERVERNAME_B)).thenReturn(true); - final Map onlineServers = new HashMap(); - onlineServers.put(SERVERNAME_B, new HServerLoad()); - onlineServers.put(SERVERNAME_A, new HServerLoad()); + final Map onlineServers = new HashMap(); + onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD); + onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD); Mockito.when(this.serverManager.getOnlineServersList()).thenReturn( new ArrayList(onlineServers.keySet())); Mockito.when(this.serverManager.getOnlineServers()).thenReturn(onlineServers); diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestMXBean.java b/src/test/java/org/apache/hadoop/hbase/master/TestMXBean.java index bd5fa90ca70..cdcd4fdb700 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestMXBean.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestMXBean.java @@ -24,7 +24,7 @@ import java.util.Set; import junit.framework.Assert; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HServerLoad; +import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.MediumTests; import org.junit.AfterClass; @@ -48,7 +48,7 @@ public class TestMXBean { TEST_UTIL.shutdownMiniCluster(); } - private void verifyRegionServers(Map regions) { + private void verifyRegionServers(Map regions) { Set expected = new HashSet(); for (int i = 0; i < 4; ++i) { HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i); @@ -56,7 +56,7 @@ public class TestMXBean { } int found = 0; - for (java.util.Map.Entry entry : regions.entrySet()) { + for (java.util.Map.Entry entry : regions.entrySet()) { if (expected.contains(entry.getKey())) { ++found; } diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index d16d156a587..7b9804986c5 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerLoad; +import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; @@ -46,9 +47,13 @@ import org.apache.hadoop.hbase.zookeeper.RootRegionTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.io.MapWritable; -import org.apache.hadoop.io.Text; import org.apache.zookeeper.KeeperException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; +import com.google.protobuf.ServiceException; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -127,7 +132,7 @@ public class TestMasterNoCluster { */ @Test public void testFailover() - throws IOException, KeeperException, InterruptedException { + throws IOException, KeeperException, InterruptedException, ServiceException { final long now = System.currentTimeMillis(); // Names for our three servers. Make the port numbers match hostname. // Will come in use down in the server when we need to figure how to respond. @@ -209,7 +214,11 @@ public class TestMasterNoCluster { while (!master.isRpcServerOpen()) Threads.sleep(10); // Fake master that there are regionservers out there. Report in. for (int i = 0; i < sns.length; i++) { - master.regionServerReport(sns[i].getVersionedBytes(), new HServerLoad()); + RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();; + ServerName sn = ServerName.parseVersionedServerName(sns[i].getVersionedBytes()); + request.setServer(ProtobufUtil.toServerName(sn)); + request.setLoad(ServerLoad.EMPTY_SERVERLOAD.getServerLoadPB()); + master.regionServerReport(null, request.build()); } // Master should now come up. while (!master.isInitialized()) {Threads.sleep(10);} @@ -229,10 +238,11 @@ public class TestMasterNoCluster { * @throws KeeperException * @throws InterruptedException * @throws DeserializationException + * @throws ServiceException */ @Test public void testCatalogDeploys() - throws IOException, KeeperException, InterruptedException, DeserializationException { + throws IOException, KeeperException, InterruptedException, DeserializationException, ServiceException { final Configuration conf = TESTUTIL.getConfiguration(); final long now = System.currentTimeMillis(); // Name for our single mocked up regionserver. @@ -286,11 +296,19 @@ public class TestMasterNoCluster { // Wait till master is up ready for RPCs. while (!master.isRpcServerOpen()) Threads.sleep(10); // Fake master that there is a regionserver out there. Report in. - MapWritable mw = master.regionServerStartup(rs0.getServerName().getPort(), - rs0.getServerName().getStartcode(), now); + RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); + request.setPort(rs0.getServerName().getPort()); + request.setServerStartCode(rs0.getServerName().getStartcode()); + request.setServerCurrentTime(now); + RegionServerStartupResponse result = + master.regionServerStartup(null, request.build()); + String rshostname = new String(); + for (NameStringPair e : result.getMapEntriesList()) { + if (e.getName().toString().equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) { + rshostname = e.getValue(); + } + } // Assert hostname is as expected. - String rshostname = - mw.get(new Text(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)).toString(); assertEquals(rs0.getServerName().getHostname(), rshostname); // Now master knows there is at least one regionserver checked in and so // it'll wait a while to see if more and when none, will assign root and diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java index e99d2514a4d..e41fca37748 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; import org.apache.hadoop.hbase.ipc.HMasterInterface; -import org.apache.hadoop.hbase.ipc.HMasterRegionInterface; import org.apache.hadoop.hbase.ipc.ProtocolSignature; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil;