diff --git a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index d6e673aabfe..fad4b2beaba 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -76,7 +76,10 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; @@ -1431,7 +1434,20 @@ public class HBaseAdmin implements Abortable, Closeable { throws MasterNotRunningException, ZooKeeperConnectionException { MasterKeepAliveConnection master = connection.getKeepAliveMaster(); try { - return master.balanceSwitch(b); + SetBalancerRunningRequest req = RequestConverter.buildLoadBalancerIsRequest(b, false); + return master.loadBalancerIs(null, req).getPrevBalanceValue(); + } catch (ServiceException se) { + IOException ioe = ProtobufUtil.getRemoteException(se); + if (ioe instanceof MasterNotRunningException) { + throw (MasterNotRunningException)ioe; + } + if (ioe instanceof ZooKeeperConnectionException) { + throw (ZooKeeperConnectionException)ioe; + } + + // Throwing MasterNotRunningException even though not really valid in order to not + // break interface by adding additional exception type. + throw new MasterNotRunningException("Unexpected exception when calling balanceSwitch",se); } finally { master.close(); } @@ -1444,10 +1460,10 @@ public class HBaseAdmin implements Abortable, Closeable { * @return True if balancer ran, false otherwise. */ public boolean balancer() - throws MasterNotRunningException, ZooKeeperConnectionException { + throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException { MasterKeepAliveConnection master = connection.getKeepAliveMaster(); try { - return master.balance(); + return master.balance(null,RequestConverter.buildBalanceRequest()).getBalancerRan(); } finally { master.close(); } @@ -1599,8 +1615,8 @@ public class HBaseAdmin implements Abortable, Closeable { public synchronized void shutdown() throws IOException { execute(new MasterCallable() { @Override - public Void call() throws IOException { - master.shutdown(); + public Void call() throws ServiceException { + master.shutdown(null,ShutdownRequest.newBuilder().build()); return null; } }); @@ -1615,8 +1631,8 @@ public class HBaseAdmin implements Abortable, Closeable { public synchronized void stopMaster() throws IOException { execute(new MasterCallable() { @Override - public Void call() throws IOException { - master.stopMaster(); + public Void call() throws ServiceException { + master.stopMaster(null,StopMasterRequest.newBuilder().build()); return null; } }); @@ -1678,7 +1694,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws ZooKeeperConnectionException if unable to connect to zookeeper */ public static void checkHBaseAvailable(Configuration conf) - throws MasterNotRunningException, ZooKeeperConnectionException { + throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException { Configuration copyOfConf = HBaseConfiguration.create(conf); // We set it to make it fail as soon as possible if HBase is not available @@ -1716,7 +1732,7 @@ public class HBaseAdmin implements Abortable, Closeable { MasterKeepAliveConnection master = null; try { master = connection.getKeepAliveMaster(); - master.isMasterRunning(); + master.isMasterRunning(null,RequestConverter.buildIsMasterRunningRequest()); } finally { if (master != null) { master.close(); diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index bb3a3b71a1e..a72e72ddd98 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -78,6 +78,8 @@ import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.ipc.HMasterInterface; import org.apache.hadoop.hbase.ipc.VersionedProtocol; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; @@ -672,7 +674,7 @@ public class HConnectionManager { * Create a new Master proxy. Try once only. */ private HMasterInterface createMasterInterface() - throws IOException, KeeperException { + throws IOException, KeeperException, ServiceException { ZooKeeperKeepAliveConnection zkw; try { @@ -699,7 +701,8 @@ public class HConnectionManager { HMasterInterface.class, HMasterInterface.VERSION, isa, this.conf, this.rpcTimeout); - if (tryMaster.isMasterRunning()) { + if (tryMaster.isMasterRunning( + null, RequestConverter.buildIsMasterRunningRequest()).getIsMasterRunning()) { return tryMaster; } else { HBaseRPC.stopProxy(tryMaster); @@ -760,6 +763,8 @@ public class HConnectionManager { exceptionCaught = e; } catch (KeeperException e) { exceptionCaught = e; + } catch (ServiceException e) { + exceptionCaught = e; } if (exceptionCaught != null) @@ -1640,7 +1645,8 @@ public class HConnectionManager { return false; } try { - return keepAliveMaster.isMasterRunning(); + return keepAliveMaster.isMasterRunning( + null, RequestConverter.buildIsMasterRunningRequest()).getIsMasterRunning(); }catch (UndeclaredThrowableException e){ // It's somehow messy, but we can receive exceptions such as // java.net.ConnectException but they're not declared. So we catch @@ -1648,6 +1654,9 @@ public class HConnectionManager { LOG.info("Master connection is not running anymore", e.getUndeclaredThrowable()); return false; + } catch (ServiceException se) { + LOG.warn("Checking master connection", se); + return false; } } diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java b/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java index 80c2165dc82..da7451d4082 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java @@ -31,8 +31,19 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionReque import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse; +import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.security.TokenInfo; import org.apache.hadoop.hbase.security.KerberosInfo; import org.apache.hadoop.hbase.util.Pair; @@ -67,8 +78,15 @@ public interface HMasterInterface extends VersionedProtocol { // 31: 5/8/2012 - HBASE-5445: Converted to PB-based calls public static final long VERSION = 31L; - /** @return true if master is available */ - public boolean isMasterRunning(); + /** + * @param c Unused (set to null). + * @param req IsMasterRunningRequest + * @return IsMasterRunningRequest that contains:
+ * isMasterRunning: true if master is available + * @throws ServiceException + */ + public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) + throws ServiceException; // Admin tools would use these cmds @@ -158,16 +176,24 @@ public interface HMasterInterface extends VersionedProtocol { /** * Shutdown an HBase cluster. - * @throws IOException e + * @param controller Unused (set to null). + * @param request ShutdownRequest + * @return ShutdownResponse + * @throws ServiceException */ - public void shutdown() throws IOException; + public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request) + throws ServiceException; /** * Stop HBase Master only. * Does not shutdown the cluster. - * @throws IOException e + * @param controller Unused (set to null). + * @param request StopMasterRequest + * @return StopMasterResponse + * @throws ServiceException */ - public void stopMaster() throws IOException; + public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request) + throws ServiceException; /** * Return cluster status. @@ -190,26 +216,27 @@ public interface HMasterInterface extends VersionedProtocol { * Run the balancer. Will run the balancer and if regions to move, it will * go ahead and do the reassignments. Can NOT run for various reasons. Check * logs. - * @return True if balancer ran and was able to tell the region servers to + * @param c Unused (set to null). + * @param request BalanceRequest + * @return BalanceResponse that contains:
+ * - balancerRan: True if balancer ran and was able to tell the region servers to * unassign all the regions to balance (the re-assignment itself is async), * false otherwise. */ - public boolean balance(); + public BalanceResponse balance(RpcController c, BalanceRequest request) throws ServiceException; /** * Turn the load balancer on or off. - * @param b If true, enable balancer. If false, disable balancer. - * @return Previous balancer value + * @param controller Unused (set to null). + * @param req SetBalancerRunningRequest that contains:
+ * - on: If true, enable balancer. If false, disable balancer.
+ * - synchronous: if true, wait until current balance() call, if outstanding, to return. + * @return SetBalancerRunningResponse that contains:
+ * - prevBalanceValue: Previous balancer value + * @throws ServiceException */ - public boolean balanceSwitch(final boolean b); - - /** - * Turn the load balancer on or off. - * It waits until current balance() call, if outstanding, to return. - * @param b If true, enable balancer. If false, disable balancer. - * @return Previous balancer value - */ - public boolean synchronousBalanceSwitch(final boolean b); + public SetBalancerRunningResponse loadBalancerIs(RpcController controller, SetBalancerRunningRequest req) + throws ServiceException; /** * Get array of all HTDs. @@ -240,9 +267,9 @@ public interface HMasterInterface extends VersionedProtocol { * back to the same server. Use {@link #moveRegion(RpcController,MoveRegionRequest} * if you want to control the region movement. * @param controller Unused (set to null). - * @param req The request which contains: + * @param req The request which contains:
* - region: Region to unassign. Will clear any existing RegionPlan - * if one found. + * if one found.
* - force: If true, force unassign (Will remove region from * regions-in-transition too if present as well as from assigned regions -- * radical!.If results in double assignment use hbck -fix to resolve. @@ -254,11 +281,11 @@ public interface HMasterInterface extends VersionedProtocol { /** * Move a region to a specified destination server. * @param controller Unused (set to null). - * @param req The request which contains: + * @param req The request which contains:
* - region: The encoded region name; i.e. the hash that makes * up the region name suffix: e.g. if regionname is * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. + * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396.
* - destServerName: The servername of the destination regionserver. If * passed the empty byte array we'll assign to a random server. A server name * is made of host, port and startcode. Here is an example: diff --git a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 778e80b51c3..15ecd274147 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -79,6 +79,7 @@ import org.apache.hadoop.hbase.ipc.HBaseServer; import org.apache.hadoop.hbase.ipc.HMasterInterface; import org.apache.hadoop.hbase.ipc.RegionServerStatusProtocol; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.ipc.ProtocolSignature; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.handler.CreateTableHandler; @@ -134,6 +135,16 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Repor import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse; import com.google.protobuf.ServiceException; /** @@ -1062,6 +1073,11 @@ Server { return !isStopped(); } + public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) + throws ServiceException { + return IsMasterRunningResponse.newBuilder().setIsMasterRunning(isMasterRunning()).build(); + } + /** * @return Maximum time we should run balancer for */ @@ -1079,7 +1095,6 @@ Server { return balancerCutoffTime; } - @Override public boolean balance() { // If balance not true, don't run balancer. if (!this.balanceSwitch) return false; @@ -1154,6 +1169,11 @@ Server { return balancerRan; } + @Override + public BalanceResponse balance(RpcController c, BalanceRequest request) throws ServiceException { + return BalanceResponse.newBuilder().setBalancerRan(balance()).build(); + } + enum BalanceSwitchMode { SYNC, ASYNC @@ -1185,19 +1205,25 @@ Server { } catch (IOException ioe) { LOG.warn("Error flipping balance switch", ioe); } - return oldValue; + return oldValue; } - - @Override + public boolean synchronousBalanceSwitch(final boolean b) { return switchBalancer(b, BalanceSwitchMode.SYNC); } - - @Override + public boolean balanceSwitch(final boolean b) { return switchBalancer(b, BalanceSwitchMode.ASYNC); } + @Override + public SetBalancerRunningResponse loadBalancerIs(RpcController controller, SetBalancerRunningRequest req) + throws ServiceException { + boolean prevValue = (req.getSynchronous())? + synchronousBalanceSwitch(req.getOn()):balanceSwitch(req.getOn()); + return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build(); + } + /** * Switch for the background CatalogJanitor thread. * Used for testing. The thread will continue to run. It will just be a noop @@ -1705,7 +1731,6 @@ Server { } @SuppressWarnings("deprecation") - @Override public void shutdown() { if (cpHost != null) { try { @@ -1730,6 +1755,12 @@ Server { } @Override + public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request) + throws ServiceException { + shutdown(); + return ShutdownResponse.newBuilder().build(); + } + public void stopMaster() { if (cpHost != null) { try { @@ -1741,6 +1772,13 @@ Server { stop("Stopped by " + Thread.currentThread().getName()); } + @Override + public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request) + throws ServiceException { + stopMaster(); + return StopMasterResponse.newBuilder().build(); + } + @Override public void stop(final String why) { LOG.info(why); diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 90cb53d1b97..2920a719f03 100644 --- a/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -81,6 +81,9 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Re import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -884,4 +887,33 @@ public final class RequestConverter { builder.setForce(force); return builder.build(); } + + /** + * Creates a protocol buffer IsMasterRunningRequest + * + * @return a IsMasterRunningRequest + */ + public static IsMasterRunningRequest buildIsMasterRunningRequest() { + return IsMasterRunningRequest.newBuilder().build(); + } + + /** + * Creates a protocol buffer BalanceRequest + * + * @return a BalanceRequest + */ + public static BalanceRequest buildBalanceRequest() { + return BalanceRequest.newBuilder().build(); + } + + /** + * Creates a protocol buffer SetBalancerRunningRequest + * + * @param on + * @param synchronous + * @return a SetBalancerRunningRequest + */ + public static SetBalancerRunningRequest buildLoadBalancerIsRequest(boolean on, boolean synchronous) { + return SetBalancerRunningRequest.newBuilder().setOn(on).setSynchronous(synchronous).build(); + } } diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 944e403d7b9..213b6a5e81e 100644 --- a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -2557,6 +2557,3380 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:UnassignRegionResponse) } + public interface IsMasterRunningRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class IsMasterRunningRequest extends + com.google.protobuf.GeneratedMessage + implements IsMasterRunningRequestOrBuilder { + // Use IsMasterRunningRequest.newBuilder() to construct. + private IsMasterRunningRequest(Builder builder) { + super(builder); + } + private IsMasterRunningRequest(boolean noInit) {} + + private static final IsMasterRunningRequest defaultInstance; + public static IsMasterRunningRequest getDefaultInstance() { + return defaultInstance; + } + + public IsMasterRunningRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsMasterRunningRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsMasterRunningRequest_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsMasterRunningRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsMasterRunningRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:IsMasterRunningRequest) + } + + static { + defaultInstance = new IsMasterRunningRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:IsMasterRunningRequest) + } + + public interface IsMasterRunningResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool isMasterRunning = 1; + boolean hasIsMasterRunning(); + boolean getIsMasterRunning(); + } + public static final class IsMasterRunningResponse extends + com.google.protobuf.GeneratedMessage + implements IsMasterRunningResponseOrBuilder { + // Use IsMasterRunningResponse.newBuilder() to construct. + private IsMasterRunningResponse(Builder builder) { + super(builder); + } + private IsMasterRunningResponse(boolean noInit) {} + + private static final IsMasterRunningResponse defaultInstance; + public static IsMasterRunningResponse getDefaultInstance() { + return defaultInstance; + } + + public IsMasterRunningResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsMasterRunningResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsMasterRunningResponse_fieldAccessorTable; + } + + private int bitField0_; + // required bool isMasterRunning = 1; + public static final int ISMASTERRUNNING_FIELD_NUMBER = 1; + private boolean isMasterRunning_; + public boolean hasIsMasterRunning() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getIsMasterRunning() { + return isMasterRunning_; + } + + private void initFields() { + isMasterRunning_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasIsMasterRunning()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, isMasterRunning_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, isMasterRunning_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) obj; + + boolean result = true; + result = result && (hasIsMasterRunning() == other.hasIsMasterRunning()); + if (hasIsMasterRunning()) { + result = result && (getIsMasterRunning() + == other.getIsMasterRunning()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasIsMasterRunning()) { + hash = (37 * hash) + ISMASTERRUNNING_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsMasterRunning()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsMasterRunningResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsMasterRunningResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + isMasterRunning_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.isMasterRunning_ = isMasterRunning_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()) return this; + if (other.hasIsMasterRunning()) { + setIsMasterRunning(other.getIsMasterRunning()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasIsMasterRunning()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + isMasterRunning_ = input.readBool(); + break; + } + } + } + } + + private int bitField0_; + + // required bool isMasterRunning = 1; + private boolean isMasterRunning_ ; + public boolean hasIsMasterRunning() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getIsMasterRunning() { + return isMasterRunning_; + } + public Builder setIsMasterRunning(boolean value) { + bitField0_ |= 0x00000001; + isMasterRunning_ = value; + onChanged(); + return this; + } + public Builder clearIsMasterRunning() { + bitField0_ = (bitField0_ & ~0x00000001); + isMasterRunning_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:IsMasterRunningResponse) + } + + static { + defaultInstance = new IsMasterRunningResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:IsMasterRunningResponse) + } + + public interface ShutdownRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class ShutdownRequest extends + com.google.protobuf.GeneratedMessage + implements ShutdownRequestOrBuilder { + // Use ShutdownRequest.newBuilder() to construct. + private ShutdownRequest(Builder builder) { + super(builder); + } + private ShutdownRequest(boolean noInit) {} + + private static final ShutdownRequest defaultInstance; + public static ShutdownRequest getDefaultInstance() { + return defaultInstance; + } + + public ShutdownRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ShutdownRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ShutdownRequest_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ShutdownRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ShutdownRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:ShutdownRequest) + } + + static { + defaultInstance = new ShutdownRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ShutdownRequest) + } + + public interface ShutdownResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class ShutdownResponse extends + com.google.protobuf.GeneratedMessage + implements ShutdownResponseOrBuilder { + // Use ShutdownResponse.newBuilder() to construct. + private ShutdownResponse(Builder builder) { + super(builder); + } + private ShutdownResponse(boolean noInit) {} + + private static final ShutdownResponse defaultInstance; + public static ShutdownResponse getDefaultInstance() { + return defaultInstance; + } + + public ShutdownResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ShutdownResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ShutdownResponse_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ShutdownResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ShutdownResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:ShutdownResponse) + } + + static { + defaultInstance = new ShutdownResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ShutdownResponse) + } + + public interface StopMasterRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class StopMasterRequest extends + com.google.protobuf.GeneratedMessage + implements StopMasterRequestOrBuilder { + // Use StopMasterRequest.newBuilder() to construct. + private StopMasterRequest(Builder builder) { + super(builder); + } + private StopMasterRequest(boolean noInit) {} + + private static final StopMasterRequest defaultInstance; + public static StopMasterRequest getDefaultInstance() { + return defaultInstance; + } + + public StopMasterRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_StopMasterRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_StopMasterRequest_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_StopMasterRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_StopMasterRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:StopMasterRequest) + } + + static { + defaultInstance = new StopMasterRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StopMasterRequest) + } + + public interface StopMasterResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class StopMasterResponse extends + com.google.protobuf.GeneratedMessage + implements StopMasterResponseOrBuilder { + // Use StopMasterResponse.newBuilder() to construct. + private StopMasterResponse(Builder builder) { + super(builder); + } + private StopMasterResponse(boolean noInit) {} + + private static final StopMasterResponse defaultInstance; + public static StopMasterResponse getDefaultInstance() { + return defaultInstance; + } + + public StopMasterResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_StopMasterResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_StopMasterResponse_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_StopMasterResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_StopMasterResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:StopMasterResponse) + } + + static { + defaultInstance = new StopMasterResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StopMasterResponse) + } + + public interface BalanceRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class BalanceRequest extends + com.google.protobuf.GeneratedMessage + implements BalanceRequestOrBuilder { + // Use BalanceRequest.newBuilder() to construct. + private BalanceRequest(Builder builder) { + super(builder); + } + private BalanceRequest(boolean noInit) {} + + private static final BalanceRequest defaultInstance; + public static BalanceRequest getDefaultInstance() { + return defaultInstance; + } + + public BalanceRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceRequest_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:BalanceRequest) + } + + static { + defaultInstance = new BalanceRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BalanceRequest) + } + + public interface BalanceResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool balancerRan = 1; + boolean hasBalancerRan(); + boolean getBalancerRan(); + } + public static final class BalanceResponse extends + com.google.protobuf.GeneratedMessage + implements BalanceResponseOrBuilder { + // Use BalanceResponse.newBuilder() to construct. + private BalanceResponse(Builder builder) { + super(builder); + } + private BalanceResponse(boolean noInit) {} + + private static final BalanceResponse defaultInstance; + public static BalanceResponse getDefaultInstance() { + return defaultInstance; + } + + public BalanceResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceResponse_fieldAccessorTable; + } + + private int bitField0_; + // required bool balancerRan = 1; + public static final int BALANCERRAN_FIELD_NUMBER = 1; + private boolean balancerRan_; + public boolean hasBalancerRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getBalancerRan() { + return balancerRan_; + } + + private void initFields() { + balancerRan_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBalancerRan()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, balancerRan_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, balancerRan_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) obj; + + boolean result = true; + result = result && (hasBalancerRan() == other.hasBalancerRan()); + if (hasBalancerRan()) { + result = result && (getBalancerRan() + == other.getBalancerRan()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBalancerRan()) { + hash = (37 * hash) + BALANCERRAN_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getBalancerRan()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + balancerRan_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.balancerRan_ = balancerRan_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()) return this; + if (other.hasBalancerRan()) { + setBalancerRan(other.getBalancerRan()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBalancerRan()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + balancerRan_ = input.readBool(); + break; + } + } + } + } + + private int bitField0_; + + // required bool balancerRan = 1; + private boolean balancerRan_ ; + public boolean hasBalancerRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getBalancerRan() { + return balancerRan_; + } + public Builder setBalancerRan(boolean value) { + bitField0_ |= 0x00000001; + balancerRan_ = value; + onChanged(); + return this; + } + public Builder clearBalancerRan() { + bitField0_ = (bitField0_ & ~0x00000001); + balancerRan_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:BalanceResponse) + } + + static { + defaultInstance = new BalanceResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BalanceResponse) + } + + public interface SetBalancerRunningRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool on = 1; + boolean hasOn(); + boolean getOn(); + + // optional bool synchronous = 2; + boolean hasSynchronous(); + boolean getSynchronous(); + } + public static final class SetBalancerRunningRequest extends + com.google.protobuf.GeneratedMessage + implements SetBalancerRunningRequestOrBuilder { + // Use SetBalancerRunningRequest.newBuilder() to construct. + private SetBalancerRunningRequest(Builder builder) { + super(builder); + } + private SetBalancerRunningRequest(boolean noInit) {} + + private static final SetBalancerRunningRequest defaultInstance; + public static SetBalancerRunningRequest getDefaultInstance() { + return defaultInstance; + } + + public SetBalancerRunningRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetBalancerRunningRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetBalancerRunningRequest_fieldAccessorTable; + } + + private int bitField0_; + // required bool on = 1; + public static final int ON_FIELD_NUMBER = 1; + private boolean on_; + public boolean hasOn() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getOn() { + return on_; + } + + // optional bool synchronous = 2; + public static final int SYNCHRONOUS_FIELD_NUMBER = 2; + private boolean synchronous_; + public boolean hasSynchronous() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public boolean getSynchronous() { + return synchronous_; + } + + private void initFields() { + on_ = false; + synchronous_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasOn()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, on_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, synchronous_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, on_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, synchronous_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest) obj; + + boolean result = true; + result = result && (hasOn() == other.hasOn()); + if (hasOn()) { + result = result && (getOn() + == other.getOn()); + } + result = result && (hasSynchronous() == other.hasSynchronous()); + if (hasSynchronous()) { + result = result && (getSynchronous() + == other.getSynchronous()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasOn()) { + hash = (37 * hash) + ON_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getOn()); + } + if (hasSynchronous()) { + hash = (37 * hash) + SYNCHRONOUS_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getSynchronous()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetBalancerRunningRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetBalancerRunningRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + on_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + synchronous_ = false; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.on_ = on_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.synchronous_ = synchronous_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance()) return this; + if (other.hasOn()) { + setOn(other.getOn()); + } + if (other.hasSynchronous()) { + setSynchronous(other.getSynchronous()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasOn()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + on_ = input.readBool(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + synchronous_ = input.readBool(); + break; + } + } + } + } + + private int bitField0_; + + // required bool on = 1; + private boolean on_ ; + public boolean hasOn() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getOn() { + return on_; + } + public Builder setOn(boolean value) { + bitField0_ |= 0x00000001; + on_ = value; + onChanged(); + return this; + } + public Builder clearOn() { + bitField0_ = (bitField0_ & ~0x00000001); + on_ = false; + onChanged(); + return this; + } + + // optional bool synchronous = 2; + private boolean synchronous_ ; + public boolean hasSynchronous() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public boolean getSynchronous() { + return synchronous_; + } + public Builder setSynchronous(boolean value) { + bitField0_ |= 0x00000002; + synchronous_ = value; + onChanged(); + return this; + } + public Builder clearSynchronous() { + bitField0_ = (bitField0_ & ~0x00000002); + synchronous_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:SetBalancerRunningRequest) + } + + static { + defaultInstance = new SetBalancerRunningRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SetBalancerRunningRequest) + } + + public interface SetBalancerRunningResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional bool prevBalanceValue = 1; + boolean hasPrevBalanceValue(); + boolean getPrevBalanceValue(); + } + public static final class SetBalancerRunningResponse extends + com.google.protobuf.GeneratedMessage + implements SetBalancerRunningResponseOrBuilder { + // Use SetBalancerRunningResponse.newBuilder() to construct. + private SetBalancerRunningResponse(Builder builder) { + super(builder); + } + private SetBalancerRunningResponse(boolean noInit) {} + + private static final SetBalancerRunningResponse defaultInstance; + public static SetBalancerRunningResponse getDefaultInstance() { + return defaultInstance; + } + + public SetBalancerRunningResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetBalancerRunningResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetBalancerRunningResponse_fieldAccessorTable; + } + + private int bitField0_; + // optional bool prevBalanceValue = 1; + public static final int PREVBALANCEVALUE_FIELD_NUMBER = 1; + private boolean prevBalanceValue_; + public boolean hasPrevBalanceValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getPrevBalanceValue() { + return prevBalanceValue_; + } + + private void initFields() { + prevBalanceValue_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, prevBalanceValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, prevBalanceValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse) obj; + + boolean result = true; + result = result && (hasPrevBalanceValue() == other.hasPrevBalanceValue()); + if (hasPrevBalanceValue()) { + result = result && (getPrevBalanceValue() + == other.getPrevBalanceValue()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasPrevBalanceValue()) { + hash = (37 * hash) + PREVBALANCEVALUE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getPrevBalanceValue()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetBalancerRunningResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_SetBalancerRunningResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + prevBalanceValue_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.prevBalanceValue_ = prevBalanceValue_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance()) return this; + if (other.hasPrevBalanceValue()) { + setPrevBalanceValue(other.getPrevBalanceValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + prevBalanceValue_ = input.readBool(); + break; + } + } + } + } + + private int bitField0_; + + // optional bool prevBalanceValue = 1; + private boolean prevBalanceValue_ ; + public boolean hasPrevBalanceValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getPrevBalanceValue() { + return prevBalanceValue_; + } + public Builder setPrevBalanceValue(boolean value) { + bitField0_ |= 0x00000001; + prevBalanceValue_ = value; + onChanged(); + return this; + } + public Builder clearPrevBalanceValue() { + bitField0_ = (bitField0_ & ~0x00000001); + prevBalanceValue_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:SetBalancerRunningResponse) + } + + static { + defaultInstance = new SetBalancerRunningResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SetBalancerRunningResponse) + } + public static abstract class MasterService implements com.google.protobuf.Service { protected MasterService() {} @@ -2577,6 +5951,31 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest request, com.google.protobuf.RpcCallback done); + public abstract void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void shutdown( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void stopMaster( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void balance( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void setBalancerRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -2606,6 +6005,46 @@ public final class MasterProtos { impl.unassignRegion(controller, request, done); } + @java.lang.Override + public void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done) { + impl.isMasterRunning(controller, request, done); + } + + @java.lang.Override + public void shutdown( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest request, + com.google.protobuf.RpcCallback done) { + impl.shutdown(controller, request, done); + } + + @java.lang.Override + public void stopMaster( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request, + com.google.protobuf.RpcCallback done) { + impl.stopMaster(controller, request, done); + } + + @java.lang.Override + public void balance( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request, + com.google.protobuf.RpcCallback done) { + impl.balance(controller, request, done); + } + + @java.lang.Override + public void setBalancerRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request, + com.google.protobuf.RpcCallback done) { + impl.setBalancerRunning(controller, request, done); + } + }; } @@ -2634,6 +6073,16 @@ public final class MasterProtos { return impl.assignRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest)request); case 2: return impl.unassignRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest)request); + case 3: + return impl.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request); + case 4: + return impl.shutdown(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest)request); + case 5: + return impl.stopMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest)request); + case 6: + return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request); + case 7: + return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -2654,6 +6103,16 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -2674,6 +6133,16 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -2697,6 +6166,31 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest request, com.google.protobuf.RpcCallback done); + public abstract void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void shutdown( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void stopMaster( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void balance( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void setBalancerRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -2734,6 +6228,31 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 3: + this.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 4: + this.shutdown(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 5: + this.stopMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 6: + this.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 7: + this.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -2754,6 +6273,16 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -2774,6 +6303,16 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -2839,6 +6378,81 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance())); } + + public void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance())); + } + + public void shutdown( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance())); + } + + public void stopMaster( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance())); + } + + public void balance( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance())); + } + + public void setBalancerRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -2861,6 +6475,31 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse shutdown( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse stopMaster( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse setBalancerRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -2905,6 +6544,66 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse shutdown( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse stopMaster( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse setBalancerRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance()); + } + } } @@ -2938,6 +6637,56 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_UnassignRegionResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_IsMasterRunningRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_IsMasterRunningRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_IsMasterRunningResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_IsMasterRunningResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ShutdownRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ShutdownRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ShutdownResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ShutdownResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StopMasterRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StopMasterRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StopMasterResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StopMasterResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BalanceRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BalanceRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BalanceResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BalanceResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_SetBalancerRunningRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_SetBalancerRunningRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_SetBalancerRunningResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_SetBalancerRunningResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -2955,13 +6704,29 @@ public final class MasterProtos { "\n\024AssignRegionResponse\"O\n\025UnassignRegion" + "Request\022 \n\006region\030\001 \002(\0132\020.RegionSpecifie" + "r\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRegio" + - "nResponse2\306\001\n\rMasterService\0225\n\nmoveRegio" + - "n\022\022.MoveRegionRequest\032\023.MoveRegionRespon", - "se\022;\n\014assignRegion\022\024.AssignRegionRequest" + - "\032\025.AssignRegionResponse\022A\n\016unassignRegio" + - "n\022\026.UnassignRegionRequest\032\027.UnassignRegi" + - "onResponseBB\n*org.apache.hadoop.hbase.pr" + - "otobuf.generatedB\014MasterProtosH\001\210\001\001\240\001\001" + "nResponse\"\030\n\026IsMasterRunningRequest\"2\n\027I" + + "sMasterRunningResponse\022\027\n\017isMasterRunnin", + "g\030\001 \002(\010\"\021\n\017ShutdownRequest\"\022\n\020ShutdownRe" + + "sponse\"\023\n\021StopMasterRequest\"\024\n\022StopMaste" + + "rResponse\"\020\n\016BalanceRequest\"&\n\017BalanceRe" + + "sponse\022\023\n\013balancerRan\030\001 \002(\010\"<\n\031SetBalanc" + + "erRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchron" + + "ous\030\002 \001(\010\"6\n\032SetBalancerRunningResponse\022" + + "\030\n\020prevBalanceValue\030\001 \001(\0102\361\003\n\rMasterServ" + + "ice\0225\n\nmoveRegion\022\022.MoveRegionRequest\032\023." + + "MoveRegionResponse\022;\n\014assignRegion\022\024.Ass" + + "ignRegionRequest\032\025.AssignRegionResponse\022", + "A\n\016unassignRegion\022\026.UnassignRegionReques" + + "t\032\027.UnassignRegionResponse\022D\n\017isMasterRu" + + "nning\022\027.IsMasterRunningRequest\032\030.IsMaste" + + "rRunningResponse\022/\n\010shutdown\022\020.ShutdownR" + + "equest\032\021.ShutdownResponse\0225\n\nstopMaster\022" + + "\022.StopMasterRequest\032\023.StopMasterResponse" + + "\022,\n\007balance\022\017.BalanceRequest\032\020.BalanceRe" + + "sponse\022M\n\022setBalancerRunning\022\032.SetBalanc" + + "erRunningRequest\032\033.SetBalancerRunningRes" + + "ponseBB\n*org.apache.hadoop.hbase.protobu", + "f.generatedB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -3016,6 +6781,86 @@ public final class MasterProtos { new java.lang.String[] { }, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse.Builder.class); + internal_static_IsMasterRunningRequest_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_IsMasterRunningRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_IsMasterRunningRequest_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.Builder.class); + internal_static_IsMasterRunningResponse_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_IsMasterRunningResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_IsMasterRunningResponse_descriptor, + new java.lang.String[] { "IsMasterRunning", }, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.Builder.class); + internal_static_ShutdownRequest_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_ShutdownRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ShutdownRequest_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest.Builder.class); + internal_static_ShutdownResponse_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_ShutdownResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ShutdownResponse_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse.Builder.class); + internal_static_StopMasterRequest_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_StopMasterRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StopMasterRequest_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.Builder.class); + internal_static_StopMasterResponse_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_StopMasterResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StopMasterResponse_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.Builder.class); + internal_static_BalanceRequest_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_BalanceRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BalanceRequest_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.Builder.class); + internal_static_BalanceResponse_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_BalanceResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BalanceResponse_descriptor, + new java.lang.String[] { "BalancerRan", }, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.Builder.class); + internal_static_SetBalancerRunningRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_SetBalancerRunningRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SetBalancerRunningRequest_descriptor, + new java.lang.String[] { "On", "Synchronous", }, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.Builder.class); + internal_static_SetBalancerRunningResponse_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_SetBalancerRunningResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SetBalancerRunningResponse_descriptor, + new java.lang.String[] { "PrevBalanceValue", }, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.Builder.class); return null; } }; diff --git a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 416d76a6df0..1fc0a932602 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -90,6 +90,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Multimap; import com.google.common.collect.TreeMultimap; +import com.google.protobuf.ServiceException; /** * HBaseFsck (hbck) is a tool for checking and repairing region consistency and @@ -376,7 +377,7 @@ public class HBaseFsck { * Contacts the master and prints out cluster-wide information * @return 0 on success, non-zero on failure */ - public int onlineHbck() throws IOException, KeeperException, InterruptedException { + public int onlineHbck() throws IOException, KeeperException, InterruptedException, ServiceException { // print hbase server version errors.print("Version: " + status.getHBaseVersion()); offlineHdfsIntegrityRepair(); diff --git a/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index be52644eec6..8f9eb7dd5a1 100644 --- a/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -57,6 +57,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.*; import org.junit.experimental.categories.Category; +import com.google.protobuf.ServiceException; + /** * Class to test HBaseAdmin. @@ -1574,6 +1576,7 @@ public class TestAdmin { assertTrue(false); } catch (MasterNotRunningException ignored) { } catch (ZooKeeperConnectionException ignored) { + } catch (ServiceException ignored) { } long end = System.currentTimeMillis(); diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java b/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java index 9ff83c50e0f..63c10ed9d76 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.ipc.HMasterInterface; +import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.ipc.RemoteException; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -50,7 +51,7 @@ public class TestHMasterRPCException { try { HMasterInterface inf = (HMasterInterface) HBaseRPC.getProxy( HMasterInterface.class, HMasterInterface.VERSION, isa, conf, 100); - inf.isMasterRunning(); + inf.isMasterRunning(null,RequestConverter.buildIsMasterRunningRequest()); fail(); } catch (RemoteException ex) { assertTrue(ex.getMessage().startsWith( diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index a24f937698f..6ec62f03692 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -52,6 +52,8 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import com.google.protobuf.ServiceException; + /** * Like {@link TestSplitTransaction} in that we're testing {@link SplitTransaction} * only the below tests are against a running cluster where {@link TestSplitTransaction} @@ -104,7 +106,7 @@ public class TestSplitTransactionOnCluster { * @throws DeserializationException */ @Test (timeout = 300000) public void testRSSplitEphemeralsDisappearButDaughtersAreOnlinedAfterShutdownHandling() - throws IOException, InterruptedException, NodeExistsException, KeeperException, DeserializationException { + throws IOException, InterruptedException, NodeExistsException, KeeperException, DeserializationException, ServiceException { final byte [] tableName = Bytes.toBytes("ephemeral"); @@ -174,7 +176,7 @@ public class TestSplitTransactionOnCluster { } @Test (timeout = 300000) public void testExistingZnodeBlocksSplitAndWeRollback() - throws IOException, InterruptedException, NodeExistsException, KeeperException { + throws IOException, InterruptedException, NodeExistsException, KeeperException, ServiceException { final byte [] tableName = Bytes.toBytes("testExistingZnodeBlocksSplitAndWeRollback"); @@ -234,7 +236,7 @@ public class TestSplitTransactionOnCluster { * @throws InterruptedException */ @Test (timeout = 300000) public void testShutdownSimpleFixup() - throws IOException, InterruptedException { + throws IOException, InterruptedException, ServiceException { final byte [] tableName = Bytes.toBytes("testShutdownSimpleFixup"); // Create table then get the single region for our new table. @@ -290,7 +292,7 @@ public class TestSplitTransactionOnCluster { * @throws InterruptedException */ @Test (timeout=300000) public void testShutdownFixupWhenDaughterHasSplit() - throws IOException, InterruptedException { + throws IOException, InterruptedException, ServiceException { final byte [] tableName = Bytes.toBytes("testShutdownFixupWhenDaughterHasSplit"); @@ -371,7 +373,7 @@ public class TestSplitTransactionOnCluster { @Test(timeout = 300000) public void testMasterRestartWhenSplittingIsPartial() throws IOException, InterruptedException, NodeExistsException, - KeeperException, DeserializationException { + KeeperException, DeserializationException, ServiceException { final byte[] tableName = Bytes.toBytes("testMasterRestartWhenSplittingIsPartial"); // Create table then get the single region for our new table. @@ -451,7 +453,7 @@ public class TestSplitTransactionOnCluster { @Test (timeout = 300000) public void testMasterRestartAtRegionSplitPendingCatalogJanitor() throws IOException, InterruptedException, NodeExistsException, - KeeperException { + KeeperException, ServiceException { final byte[] tableName = Bytes.toBytes("testMasterRestartAtRegionSplitPendingCatalogJanitor"); // Create table then get the single region for our new table.