diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 5b53a7e07e1..d284fc81dc7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaRetriever;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -1823,4 +1824,22 @@ public interface Admin extends Abortable, Closeable {
* @return true if the switch is enabled, false otherwise.
*/
boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException;
+
+ /**
+ * Add a new replication peer for replicating data to slave cluster
+ * @param peerId a short name that identifies the peer
+ * @param peerConfig configuration for the replication slave cluster
+ * @throws IOException
+ */
+ default void addReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig)
+ throws IOException {
+ }
+
+ /**
+ * Remove a peer and stop the replication
+ * @param peerId a short name that identifies the peer
+ * @throws IOException
+ */
+ default void removeReplicationPeer(final String peerId) throws IOException {
+ }
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 0c512beccbd..4e31f2c7b33 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -88,6 +88,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCa
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ExceptionUtil;
@@ -1637,6 +1641,18 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
SecurityCapabilitiesRequest request) throws ServiceException {
return stub.getSecurityCapabilities(controller, request);
}
+
+ @Override
+ public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
+ AddReplicationPeerRequest request) throws ServiceException {
+ return stub.addReplicationPeer(controller, request);
+ }
+
+ @Override
+ public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
+ RemoveReplicationPeerRequest request) throws ServiceException {
+ return stub.removeReplicationPeer(controller, request);
+ }
};
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 9bfe2763c3c..19831c1fda0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaRetriever;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@@ -3744,4 +3745,29 @@ public class HBaseAdmin implements Admin {
private RpcControllerFactory getRpcControllerFactory() {
return this.rpcControllerFactory;
}
+
+ @Override
+ public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
+ throws IOException {
+ executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
+ @Override
+ protected Void rpcCall() throws Exception {
+ master.addReplicationPeer(getRpcController(),
+ RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig));
+ return null;
+ }
+ });
+ }
+
+ @Override
+ public void removeReplicationPeer(String peerId) throws IOException {
+ executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
+ @Override
+ protected Void rpcCall() throws Exception {
+ master.removeReplicationPeer(getRpcController(),
+ RequestConverter.buildRemoveReplicationPeerRequest(peerId));
+ return null;
+ }
+ });
+ }
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 25590c51086..e6b9b0d7f24 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
@@ -80,9 +81,12 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
* To see which commands are available in the shell, type
* replication
.
*
+ *
+ * @deprecated use {@link org.apache.hadoop.hbase.client.Admin} instead.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
+@Deprecated
public class ReplicationAdmin implements Closeable {
private static final Log LOG = LogFactory.getLog(ReplicationAdmin.class);
@@ -108,6 +112,8 @@ public class ReplicationAdmin implements Closeable {
*/
private final ZooKeeperWatcher zkw;
+ private Admin admin;
+
/**
* Constructor that creates a connection to the local ZooKeeper ensemble.
* @param conf Configuration to use
@@ -116,6 +122,7 @@ public class ReplicationAdmin implements Closeable {
*/
public ReplicationAdmin(Configuration conf) throws IOException {
this.connection = ConnectionFactory.createConnection(conf);
+ admin = connection.getAdmin();
try {
zkw = createZooKeeperWatcher();
try {
@@ -133,9 +140,7 @@ public class ReplicationAdmin implements Closeable {
throw exception;
}
} catch (Exception exception) {
- if (connection != null) {
- connection.close();
- }
+ connection.close();
if (exception instanceof IOException) {
throw (IOException) exception;
} else if (exception instanceof RuntimeException) {
@@ -176,11 +181,12 @@ public class ReplicationAdmin implements Closeable {
*/
@Deprecated
public void addPeer(String id, ReplicationPeerConfig peerConfig,
- Map> tableCfs) throws ReplicationException {
+ Map> tableCfs) throws ReplicationException,
+ IOException {
if (tableCfs != null) {
peerConfig.setTableCFsMap(tableCfs);
}
- this.replicationPeers.registerPeer(id, peerConfig);
+ this.admin.addReplicationPeer(id, peerConfig);
}
/**
@@ -188,10 +194,11 @@ public class ReplicationAdmin implements Closeable {
* @param id a short name that identifies the cluster
* @param peerConfig configuration for the replication slave cluster
*/
- public void addPeer(String id, ReplicationPeerConfig peerConfig) throws ReplicationException {
+ public void addPeer(String id, ReplicationPeerConfig peerConfig) throws ReplicationException,
+ IOException {
checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
peerConfig.getTableCFsMap());
- this.replicationPeers.registerPeer(id, peerConfig);
+ this.admin.addReplicationPeer(id, peerConfig);
}
/**
@@ -213,8 +220,8 @@ public class ReplicationAdmin implements Closeable {
* Removes a peer cluster and stops the replication to it.
* @param id a short name that identifies the cluster
*/
- public void removePeer(String id) throws ReplicationException {
- this.replicationPeers.unregisterPeer(id);
+ public void removePeer(String id) throws IOException {
+ this.admin.removeReplicationPeer(id);
}
/**
@@ -403,6 +410,7 @@ public class ReplicationAdmin implements Closeable {
if (this.connection != null) {
this.connection.close();
}
+ admin.close();
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index bc7a4ced99c..8506cbb257d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -41,10 +41,10 @@ public class ReplicationFactory {
}
public static ReplicationQueuesClient getReplicationQueuesClient(
- ReplicationQueuesClientArguments args)
- throws Exception {
- Class> classToBuild = args.getConf().getClass("hbase.region.replica." +
- "replication.replicationQueuesClient.class", ReplicationQueuesClientZKImpl.class);
+ ReplicationQueuesClientArguments args) throws Exception {
+ Class> classToBuild = args.getConf().getClass(
+ "hbase.region.replica.replication.replicationQueuesClient.class",
+ ReplicationQueuesClientZKImpl.class);
return (ReplicationQueuesClient) ConstructorUtils.invokeConstructor(classToBuild, args);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index f938fd0c51c..cd4712abd76 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -44,8 +44,10 @@ import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
@@ -110,6 +112,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
@@ -1560,4 +1565,19 @@ public final class RequestConverter {
}
throw new UnsupportedOperationException("Unsupport switch type:" + switchType);
}
+
+ public static ReplicationProtos.AddReplicationPeerRequest buildAddReplicationPeerRequest(
+ String peerId, ReplicationPeerConfig peerConfig) {
+ AddReplicationPeerRequest.Builder builder = AddReplicationPeerRequest.newBuilder();
+ builder.setPeerId(peerId);
+ builder.setPeerConfig(ReplicationSerDeHelper.convert(peerConfig));
+ return builder.build();
+ }
+
+ public static ReplicationProtos.RemoveReplicationPeerRequest buildRemoveReplicationPeerRequest(
+ String peerId) {
+ RemoveReplicationPeerRequest.Builder builder = RemoveReplicationPeerRequest.newBuilder();
+ builder.setPeerId(peerId);
+ return builder.build();
+ }
}
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 56442d1fcab..da5de638e41 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -66344,6 +66344,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done);
+ /**
+ *
+ ** Add a replication peer
+ *
+ *
+ * rpc AddReplicationPeer(.hbase.pb.AddReplicationPeerRequest) returns (.hbase.pb.AddReplicationPeerResponse);
+ */
+ public abstract void addReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done);
+
+ /**
+ *
+ ** Remove a replication peer
+ *
+ *
+ * rpc RemoveReplicationPeer(.hbase.pb.RemoveReplicationPeerRequest) returns (.hbase.pb.RemoveReplicationPeerResponse);
+ */
+ public abstract void removeReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done);
+
}
public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService(
@@ -66813,6 +66837,22 @@ public final class MasterProtos {
impl.listProcedures(controller, request, done);
}
+ @java.lang.Override
+ public void addReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
+ impl.addReplicationPeer(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void removeReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
+ impl.removeReplicationPeer(controller, request, done);
+ }
+
};
}
@@ -66951,6 +66991,10 @@ public final class MasterProtos {
return impl.abortProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest)request);
case 57:
return impl.listProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest)request);
+ case 58:
+ return impl.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request);
+ case 59:
+ return impl.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -67081,6 +67125,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
case 57:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
+ case 58:
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
+ case 59:
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -67211,6 +67259,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
case 57:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
+ case 58:
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
+ case 59:
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -67944,6 +67996,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done);
+ /**
+ *
+ ** Add a replication peer
+ *
+ *
+ * rpc AddReplicationPeer(.hbase.pb.AddReplicationPeerRequest) returns (.hbase.pb.AddReplicationPeerResponse);
+ */
+ public abstract void addReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done);
+
+ /**
+ *
+ ** Remove a replication peer
+ *
+ *
+ * rpc RemoveReplicationPeer(.hbase.pb.RemoveReplicationPeerRequest) returns (.hbase.pb.RemoveReplicationPeerResponse);
+ */
+ public abstract void removeReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done);
+
public static final
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -68256,6 +68332,16 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
+ case 58:
+ this.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 59:
+ this.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -68386,6 +68472,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
case 57:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
+ case 58:
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
+ case 59:
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -68516,6 +68606,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
case 57:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
+ case 58:
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
+ case 59:
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -69406,6 +69500,36 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.class,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()));
}
+
+ public void addReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(58),
+ controller,
+ request,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance(),
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.class,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance()));
+ }
+
+ public void removeReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(59),
+ controller,
+ request,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance(),
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.class,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -69703,6 +69827,16 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse addReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse removeReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -70407,6 +70541,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance());
}
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse addReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(58),
+ controller,
+ request,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse removeReplicationPeer(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(59),
+ controller,
+ request,
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:hbase.pb.MasterService)
@@ -70989,340 +71147,346 @@ public final class MasterProtos {
"\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014C" +
"lient.proto\032\023ClusterStatus.proto\032\023ErrorH" +
"andling.proto\032\017Procedure.proto\032\013Quota.pr" +
- "oto\"\234\001\n\020AddColumnRequest\022\'\n\ntable_name\030\001" +
- " \002(\0132\023.hbase.pb.TableName\0225\n\017column_fami" +
- "lies\030\002 \002(\0132\034.hbase.pb.ColumnFamilySchema" +
- "\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:" +
- "\0010\"$\n\021AddColumnResponse\022\017\n\007proc_id\030\001 \001(\004" +
- "\"}\n\023DeleteColumnRequest\022\'\n\ntable_name\030\001 " +
- "\002(\0132\023.hbase.pb.TableName\022\023\n\013column_name\030",
- "\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004" +
- " \001(\004:\0010\"\'\n\024DeleteColumnResponse\022\017\n\007proc_" +
- "id\030\001 \001(\004\"\237\001\n\023ModifyColumnRequest\022\'\n\ntabl" +
- "e_name\030\001 \002(\0132\023.hbase.pb.TableName\0225\n\017col" +
- "umn_families\030\002 \002(\0132\034.hbase.pb.ColumnFami" +
- "lySchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonc" +
- "e\030\004 \001(\004:\0010\"\'\n\024ModifyColumnResponse\022\017\n\007pr" +
- "oc_id\030\001 \001(\004\"n\n\021MoveRegionRequest\022)\n\006regi" +
- "on\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\020d" +
- "est_server_name\030\002 \001(\0132\024.hbase.pb.ServerN",
- "ame\"\024\n\022MoveRegionResponse\"\274\001\n\035DispatchMe" +
- "rgingRegionsRequest\022+\n\010region_a\030\001 \002(\0132\031." +
- "hbase.pb.RegionSpecifier\022+\n\010region_b\030\002 \002" +
- "(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010forcibl" +
- "e\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004 \001(\004:\0010\022\020" +
- "\n\005nonce\030\005 \001(\004:\0010\"1\n\036DispatchMergingRegio" +
- "nsResponse\022\017\n\007proc_id\030\001 \001(\004\"\210\001\n\030MergeTab" +
- "leRegionsRequest\022)\n\006region\030\001 \003(\0132\031.hbase" +
- ".pb.RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005f" +
- "alse\022\026\n\013nonce_group\030\004 \001(\004:\0010\022\020\n\005nonce\030\005 ",
- "\001(\004:\0010\",\n\031MergeTableRegionsResponse\022\017\n\007p" +
- "roc_id\030\001 \001(\004\"@\n\023AssignRegionRequest\022)\n\006r" +
- "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"\026" +
- "\n\024AssignRegionResponse\"X\n\025UnassignRegion" +
- "Request\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regio" +
- "nSpecifier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026Unas" +
- "signRegionResponse\"A\n\024OfflineRegionReque" +
- "st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" +
- "ifier\"\027\n\025OfflineRegionResponse\"\177\n\022Create" +
- "TableRequest\022+\n\014table_schema\030\001 \002(\0132\025.hba",
- "se.pb.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\022\026\n" +
- "\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"" +
- "&\n\023CreateTableResponse\022\017\n\007proc_id\030\001 \001(\004\"" +
- "g\n\022DeleteTableRequest\022\'\n\ntable_name\030\001 \002(" +
- "\0132\023.hbase.pb.TableName\022\026\n\013nonce_group\030\002 " +
- "\001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023DeleteTableR" +
- "esponse\022\017\n\007proc_id\030\001 \001(\004\"\207\001\n\024TruncateTab" +
- "leRequest\022&\n\ttableName\030\001 \002(\0132\023.hbase.pb." +
- "TableName\022\035\n\016preserveSplits\030\002 \001(\010:\005false" +
- "\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:",
- "\0010\"(\n\025TruncateTableResponse\022\017\n\007proc_id\030\001" +
- " \001(\004\"g\n\022EnableTableRequest\022\'\n\ntable_name" +
- "\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonce_gro" +
- "up\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023EnableT" +
- "ableResponse\022\017\n\007proc_id\030\001 \001(\004\"h\n\023Disable" +
- "TableRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase" +
- ".pb.TableName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n" +
- "\005nonce\030\003 \001(\004:\0010\"\'\n\024DisableTableResponse\022" +
- "\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022ModifyTableRequest\022" +
- "\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName",
- "\022+\n\014table_schema\030\002 \002(\0132\025.hbase.pb.TableS" +
- "chema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004" +
- " \001(\004:\0010\"&\n\023ModifyTableResponse\022\017\n\007proc_i" +
- "d\030\001 \001(\004\"~\n\026CreateNamespaceRequest\022:\n\023nam" +
- "espaceDescriptor\030\001 \002(\0132\035.hbase.pb.Namesp" +
- "aceDescriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n" +
- "\005nonce\030\003 \001(\004:\0010\"*\n\027CreateNamespaceRespon" +
- "se\022\017\n\007proc_id\030\001 \001(\004\"Y\n\026DeleteNamespaceRe" +
- "quest\022\025\n\rnamespaceName\030\001 \002(\t\022\026\n\013nonce_gr" +
- "oup\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Delete",
- "NamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026Mo" +
- "difyNamespaceRequest\022:\n\023namespaceDescrip" +
- "tor\030\001 \002(\0132\035.hbase.pb.NamespaceDescriptor" +
- "\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:" +
- "\0010\"*\n\027ModifyNamespaceResponse\022\017\n\007proc_id" +
- "\030\001 \001(\004\"6\n\035GetNamespaceDescriptorRequest\022" +
- "\025\n\rnamespaceName\030\001 \002(\t\"\\\n\036GetNamespaceDe" +
- "scriptorResponse\022:\n\023namespaceDescriptor\030" +
- "\001 \002(\0132\035.hbase.pb.NamespaceDescriptor\"!\n\037" +
- "ListNamespaceDescriptorsRequest\"^\n ListN",
- "amespaceDescriptorsResponse\022:\n\023namespace" +
- "Descriptor\030\001 \003(\0132\035.hbase.pb.NamespaceDes" +
- "criptor\"?\n&ListTableDescriptorsByNamespa" +
- "ceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"U\n\'List" +
- "TableDescriptorsByNamespaceResponse\022*\n\013t" +
- "ableSchema\030\001 \003(\0132\025.hbase.pb.TableSchema\"" +
- "9\n ListTableNamesByNamespaceRequest\022\025\n\rn" +
- "amespaceName\030\001 \002(\t\"K\n!ListTableNamesByNa" +
- "mespaceResponse\022&\n\ttableName\030\001 \003(\0132\023.hba" +
- "se.pb.TableName\"\021\n\017ShutdownRequest\"\022\n\020Sh",
- "utdownResponse\"\023\n\021StopMasterRequest\"\024\n\022S" +
- "topMasterResponse\"\034\n\032IsInMaintenanceMode" +
- "Request\"8\n\033IsInMaintenanceModeResponse\022\031" +
- "\n\021inMaintenanceMode\030\001 \002(\010\"\037\n\016BalanceRequ" +
- "est\022\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n" +
- "\014balancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunnin" +
- "gRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(" +
- "\010\"8\n\032SetBalancerRunningResponse\022\032\n\022prev_" +
- "balance_value\030\001 \001(\010\"\032\n\030IsBalancerEnabled" +
- "Request\",\n\031IsBalancerEnabledResponse\022\017\n\007",
- "enabled\030\001 \002(\010\"w\n\035SetSplitOrMergeEnabledR" +
- "equest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous\030\002" +
- " \001(\010\0220\n\014switch_types\030\003 \003(\0162\032.hbase.pb.Ma" +
- "sterSwitchType\"4\n\036SetSplitOrMergeEnabled" +
- "Response\022\022\n\nprev_value\030\001 \003(\010\"O\n\034IsSplitO" +
- "rMergeEnabledRequest\022/\n\013switch_type\030\001 \002(" +
- "\0162\032.hbase.pb.MasterSwitchType\"0\n\035IsSplit" +
- "OrMergeEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"" +
- "\022\n\020NormalizeRequest\"+\n\021NormalizeResponse" +
- "\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNormalize",
- "rRunningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormal" +
- "izerRunningResponse\022\035\n\025prev_normalizer_v" +
- "alue\030\001 \001(\010\"\034\n\032IsNormalizerEnabledRequest" +
- "\".\n\033IsNormalizerEnabledResponse\022\017\n\007enabl" +
- "ed\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026Run" +
- "CatalogScanResponse\022\023\n\013scan_result\030\001 \001(\005" +
- "\"-\n\033EnableCatalogJanitorRequest\022\016\n\006enabl" +
- "e\030\001 \002(\010\"2\n\034EnableCatalogJanitorResponse\022" +
- "\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanitorE" +
- "nabledRequest\"0\n\037IsCatalogJanitorEnabled",
- "Response\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotReque" +
- "st\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snapshot" +
- "Description\",\n\020SnapshotResponse\022\030\n\020expec" +
- "ted_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnapsho" +
- "tsRequest\"Q\n\035GetCompletedSnapshotsRespon" +
- "se\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.Snapsho" +
- "tDescription\"H\n\025DeleteSnapshotRequest\022/\n" +
+ "oto\032\021Replication.proto\"\234\001\n\020AddColumnRequ" +
+ "est\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Table" +
+ "Name\0225\n\017column_families\030\002 \002(\0132\034.hbase.pb" +
+ ".ColumnFamilySchema\022\026\n\013nonce_group\030\003 \001(\004" +
+ ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"$\n\021AddColumnRespon" +
+ "se\022\017\n\007proc_id\030\001 \001(\004\"}\n\023DeleteColumnReque" +
+ "st\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableN",
+ "ame\022\023\n\013column_name\030\002 \002(\014\022\026\n\013nonce_group\030" +
+ "\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024DeleteColu" +
+ "mnResponse\022\017\n\007proc_id\030\001 \001(\004\"\237\001\n\023ModifyCo" +
+ "lumnRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase." +
+ "pb.TableName\0225\n\017column_families\030\002 \002(\0132\034." +
+ "hbase.pb.ColumnFamilySchema\022\026\n\013nonce_gro" +
+ "up\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024ModifyC" +
+ "olumnResponse\022\017\n\007proc_id\030\001 \001(\004\"n\n\021MoveRe" +
+ "gionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" +
+ "egionSpecifier\022.\n\020dest_server_name\030\002 \001(\013",
+ "2\024.hbase.pb.ServerName\"\024\n\022MoveRegionResp" +
+ "onse\"\274\001\n\035DispatchMergingRegionsRequest\022+" +
+ "\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpecif" +
+ "ier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.RegionS" +
+ "pecifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\026\n\013non" +
+ "ce_group\030\004 \001(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\"1\n\036D" +
+ "ispatchMergingRegionsResponse\022\017\n\007proc_id" +
+ "\030\001 \001(\004\"\210\001\n\030MergeTableRegionsRequest\022)\n\006r" +
+ "egion\030\001 \003(\0132\031.hbase.pb.RegionSpecifier\022\027" +
+ "\n\010forcible\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004",
+ " \001(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\",\n\031MergeTableR" +
+ "egionsResponse\022\017\n\007proc_id\030\001 \001(\004\"@\n\023Assig" +
+ "nRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.p" +
+ "b.RegionSpecifier\"\026\n\024AssignRegionRespons" +
+ "e\"X\n\025UnassignRegionRequest\022)\n\006region\030\001 \002" +
+ "(\0132\031.hbase.pb.RegionSpecifier\022\024\n\005force\030\002" +
+ " \001(\010:\005false\"\030\n\026UnassignRegionResponse\"A\n" +
+ "\024OfflineRegionRequest\022)\n\006region\030\001 \002(\0132\031." +
+ "hbase.pb.RegionSpecifier\"\027\n\025OfflineRegio" +
+ "nResponse\"\177\n\022CreateTableRequest\022+\n\014table",
+ "_schema\030\001 \002(\0132\025.hbase.pb.TableSchema\022\022\n\n" +
+ "split_keys\030\002 \003(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010" +
+ "\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023CreateTableRespons" +
+ "e\022\017\n\007proc_id\030\001 \001(\004\"g\n\022DeleteTableRequest" +
+ "\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNam" +
+ "e\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004" +
+ ":\0010\"&\n\023DeleteTableResponse\022\017\n\007proc_id\030\001 " +
+ "\001(\004\"\207\001\n\024TruncateTableRequest\022&\n\ttableNam" +
+ "e\030\001 \002(\0132\023.hbase.pb.TableName\022\035\n\016preserve" +
+ "Splits\030\002 \001(\010:\005false\022\026\n\013nonce_group\030\003 \001(\004",
+ ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"(\n\025TruncateTableRe" +
+ "sponse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022EnableTableRe" +
+ "quest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Tab" +
+ "leName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" +
+ "\003 \001(\004:\0010\"&\n\023EnableTableResponse\022\017\n\007proc_" +
+ "id\030\001 \001(\004\"h\n\023DisableTableRequest\022\'\n\ntable" +
+ "_name\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonc" +
+ "e_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\'\n\024Di" +
+ "sableTableResponse\022\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022" +
+ "ModifyTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023",
+ ".hbase.pb.TableName\022+\n\014table_schema\030\002 \002(" +
+ "\0132\025.hbase.pb.TableSchema\022\026\n\013nonce_group\030" +
+ "\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTabl" +
+ "eResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026CreateName" +
+ "spaceRequest\022:\n\023namespaceDescriptor\030\001 \002(" +
+ "\0132\035.hbase.pb.NamespaceDescriptor\022\026\n\013nonc" +
+ "e_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Cr" +
+ "eateNamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"Y" +
+ "\n\026DeleteNamespaceRequest\022\025\n\rnamespaceNam" +
+ "e\030\001 \002(\t\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce",
+ "\030\003 \001(\004:\0010\"*\n\027DeleteNamespaceResponse\022\017\n\007" +
+ "proc_id\030\001 \001(\004\"~\n\026ModifyNamespaceRequest\022" +
+ ":\n\023namespaceDescriptor\030\001 \002(\0132\035.hbase.pb." +
+ "NamespaceDescriptor\022\026\n\013nonce_group\030\002 \001(\004" +
+ ":\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027ModifyNamespace" +
+ "Response\022\017\n\007proc_id\030\001 \001(\004\"6\n\035GetNamespac" +
+ "eDescriptorRequest\022\025\n\rnamespaceName\030\001 \002(" +
+ "\t\"\\\n\036GetNamespaceDescriptorResponse\022:\n\023n" +
+ "amespaceDescriptor\030\001 \002(\0132\035.hbase.pb.Name" +
+ "spaceDescriptor\"!\n\037ListNamespaceDescript",
+ "orsRequest\"^\n ListNamespaceDescriptorsRe" +
+ "sponse\022:\n\023namespaceDescriptor\030\001 \003(\0132\035.hb" +
+ "ase.pb.NamespaceDescriptor\"?\n&ListTableD" +
+ "escriptorsByNamespaceRequest\022\025\n\rnamespac" +
+ "eName\030\001 \002(\t\"U\n\'ListTableDescriptorsByNam" +
+ "espaceResponse\022*\n\013tableSchema\030\001 \003(\0132\025.hb" +
+ "ase.pb.TableSchema\"9\n ListTableNamesByNa" +
+ "mespaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"K\n" +
+ "!ListTableNamesByNamespaceResponse\022&\n\tta" +
+ "bleName\030\001 \003(\0132\023.hbase.pb.TableName\"\021\n\017Sh",
+ "utdownRequest\"\022\n\020ShutdownResponse\"\023\n\021Sto" +
+ "pMasterRequest\"\024\n\022StopMasterResponse\"\034\n\032" +
+ "IsInMaintenanceModeRequest\"8\n\033IsInMainte" +
+ "nanceModeResponse\022\031\n\021inMaintenanceMode\030\001" +
+ " \002(\010\"\037\n\016BalanceRequest\022\r\n\005force\030\001 \001(\010\"\'\n" +
+ "\017BalanceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<" +
+ "\n\031SetBalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022" +
+ "\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBalancerRunni" +
+ "ngResponse\022\032\n\022prev_balance_value\030\001 \001(\010\"\032" +
+ "\n\030IsBalancerEnabledRequest\",\n\031IsBalancer",
+ "EnabledResponse\022\017\n\007enabled\030\001 \002(\010\"w\n\035SetS" +
+ "plitOrMergeEnabledRequest\022\017\n\007enabled\030\001 \002" +
+ "(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014switch_types\030" +
+ "\003 \003(\0162\032.hbase.pb.MasterSwitchType\"4\n\036Set" +
+ "SplitOrMergeEnabledResponse\022\022\n\nprev_valu" +
+ "e\030\001 \003(\010\"O\n\034IsSplitOrMergeEnabledRequest\022" +
+ "/\n\013switch_type\030\001 \002(\0162\032.hbase.pb.MasterSw" +
+ "itchType\"0\n\035IsSplitOrMergeEnabledRespons" +
+ "e\022\017\n\007enabled\030\001 \002(\010\"\022\n\020NormalizeRequest\"+" +
+ "\n\021NormalizeResponse\022\026\n\016normalizer_ran\030\001 ",
+ "\002(\010\")\n\033SetNormalizerRunningRequest\022\n\n\002on" +
+ "\030\001 \002(\010\"=\n\034SetNormalizerRunningResponse\022\035" +
+ "\n\025prev_normalizer_value\030\001 \001(\010\"\034\n\032IsNorma" +
+ "lizerEnabledRequest\".\n\033IsNormalizerEnabl" +
+ "edResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalo" +
+ "gScanRequest\"-\n\026RunCatalogScanResponse\022\023" +
+ "\n\013scan_result\030\001 \001(\005\"-\n\033EnableCatalogJani" +
+ "torRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCata" +
+ "logJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" " +
+ "\n\036IsCatalogJanitorEnabledRequest\"0\n\037IsCa",
+ "talogJanitorEnabledResponse\022\r\n\005value\030\001 \002" +
+ "(\010\"B\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132" +
+ "\035.hbase.pb.SnapshotDescription\",\n\020Snapsh" +
+ "otResponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034" +
+ "GetCompletedSnapshotsRequest\"Q\n\035GetCompl" +
+ "etedSnapshotsResponse\0220\n\tsnapshots\030\001 \003(\013" +
+ "2\035.hbase.pb.SnapshotDescription\"H\n\025Delet" +
+ "eSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hba" +
+ "se.pb.SnapshotDescription\"\030\n\026DeleteSnaps" +
+ "hotResponse\"s\n\026RestoreSnapshotRequest\022/\n",
"\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescr" +
- "iption\"\030\n\026DeleteSnapshotResponse\"s\n\026Rest" +
- "oreSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.h",
- "base.pb.SnapshotDescription\022\026\n\013nonce_gro" +
- "up\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Restore" +
- "SnapshotResponse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsS" +
- "napshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.h" +
- "base.pb.SnapshotDescription\"^\n\026IsSnapsho" +
- "tDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010sn" +
- "apshot\030\002 \001(\0132\035.hbase.pb.SnapshotDescript" +
- "ion\"O\n\034IsRestoreSnapshotDoneRequest\022/\n\010s" +
- "napshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescrip" +
- "tion\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n",
- "\004done\030\001 \001(\010:\005false\"F\n\033GetSchemaAlterStat" +
- "usRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" +
- ".TableName\"T\n\034GetSchemaAlterStatusRespon" +
- "se\022\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rtot" +
- "al_regions\030\002 \001(\r\"\213\001\n\032GetTableDescriptors" +
- "Request\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb." +
- "TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022include_sys_" +
- "tables\030\003 \001(\010:\005false\022\021\n\tnamespace\030\004 \001(\t\"J" +
- "\n\033GetTableDescriptorsResponse\022+\n\014table_s" +
- "chema\030\001 \003(\0132\025.hbase.pb.TableSchema\"[\n\024Ge",
- "tTableNamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022inc" +
- "lude_sys_tables\030\002 \001(\010:\005false\022\021\n\tnamespac" +
- "e\030\003 \001(\t\"A\n\025GetTableNamesResponse\022(\n\013tabl" +
- "e_names\030\001 \003(\0132\023.hbase.pb.TableName\"?\n\024Ge" +
- "tTableStateRequest\022\'\n\ntable_name\030\001 \002(\0132\023" +
- ".hbase.pb.TableName\"B\n\025GetTableStateResp" +
- "onse\022)\n\013table_state\030\001 \002(\0132\024.hbase.pb.Tab" +
- "leState\"\031\n\027GetClusterStatusRequest\"K\n\030Ge" +
- "tClusterStatusResponse\022/\n\016cluster_status" +
- "\030\001 \002(\0132\027.hbase.pb.ClusterStatus\"\030\n\026IsMas",
- "terRunningRequest\"4\n\027IsMasterRunningResp" +
- "onse\022\031\n\021is_master_running\030\001 \002(\010\"I\n\024ExecP" +
- "rocedureRequest\0221\n\tprocedure\030\001 \002(\0132\036.hba" +
- "se.pb.ProcedureDescription\"F\n\025ExecProced" +
- "ureResponse\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n" +
- "\013return_data\030\002 \001(\014\"K\n\026IsProcedureDoneReq" +
- "uest\0221\n\tprocedure\030\001 \001(\0132\036.hbase.pb.Proce" +
- "dureDescription\"`\n\027IsProcedureDoneRespon" +
- "se\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(" +
- "\0132\036.hbase.pb.ProcedureDescription\",\n\031Get",
- "ProcedureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"" +
- "\371\001\n\032GetProcedureResultResponse\0229\n\005state\030" +
- "\001 \002(\0162*.hbase.pb.GetProcedureResultRespo" +
- "nse.State\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_up" +
- "date\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texception\030" +
- "\005 \001(\0132!.hbase.pb.ForeignExceptionMessage" +
- "\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n" +
- "\010FINISHED\020\002\"M\n\025AbortProcedureRequest\022\017\n\007" +
- "proc_id\030\001 \002(\004\022#\n\025mayInterruptIfRunning\030\002" +
- " \001(\010:\004true\"6\n\026AbortProcedureResponse\022\034\n\024",
- "is_procedure_aborted\030\001 \002(\010\"\027\n\025ListProced" +
- "uresRequest\"@\n\026ListProceduresResponse\022&\n" +
- "\tprocedure\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001" +
- "\n\017SetQuotaRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\n" +
- "user_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\nt" +
- "able_name\030\004 \001(\0132\023.hbase.pb.TableName\022\022\n\n" +
- "remove_all\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010" +
- "\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.ThrottleRe" +
- "quest\"\022\n\020SetQuotaResponse\"J\n\037MajorCompac" +
- "tionTimestampRequest\022\'\n\ntable_name\030\001 \002(\013",
- "2\023.hbase.pb.TableName\"U\n(MajorCompaction" +
- "TimestampForRegionRequest\022)\n\006region\030\001 \002(" +
- "\0132\031.hbase.pb.RegionSpecifier\"@\n MajorCom" +
- "pactionTimestampResponse\022\034\n\024compaction_t" +
- "imestamp\030\001 \002(\003\"\035\n\033SecurityCapabilitiesRe" +
- "quest\"\354\001\n\034SecurityCapabilitiesResponse\022G" +
- "\n\014capabilities\030\001 \003(\01621.hbase.pb.Security" +
- "CapabilitiesResponse.Capability\"\202\001\n\nCapa" +
- "bility\022\031\n\025SIMPLE_AUTHENTICATION\020\000\022\031\n\025SEC" +
- "URE_AUTHENTICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022",
- "\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_VISIBILI" +
- "TY\020\004*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005M" +
- "ERGE\020\0012\261)\n\rMasterService\022e\n\024GetSchemaAlt" +
- "erStatus\022%.hbase.pb.GetSchemaAlterStatus" +
- "Request\032&.hbase.pb.GetSchemaAlterStatusR" +
- "esponse\022b\n\023GetTableDescriptors\022$.hbase.p" +
- "b.GetTableDescriptorsRequest\032%.hbase.pb." +
- "GetTableDescriptorsResponse\022P\n\rGetTableN" +
- "ames\022\036.hbase.pb.GetTableNamesRequest\032\037.h" +
- "base.pb.GetTableNamesResponse\022Y\n\020GetClus",
- "terStatus\022!.hbase.pb.GetClusterStatusReq" +
- "uest\032\".hbase.pb.GetClusterStatusResponse" +
- "\022V\n\017IsMasterRunning\022 .hbase.pb.IsMasterR" +
- "unningRequest\032!.hbase.pb.IsMasterRunning" +
- "Response\022D\n\tAddColumn\022\032.hbase.pb.AddColu" +
- "mnRequest\032\033.hbase.pb.AddColumnResponse\022M" +
- "\n\014DeleteColumn\022\035.hbase.pb.DeleteColumnRe" +
- "quest\032\036.hbase.pb.DeleteColumnResponse\022M\n" +
- "\014ModifyColumn\022\035.hbase.pb.ModifyColumnReq" +
- "uest\032\036.hbase.pb.ModifyColumnResponse\022G\n\n",
- "MoveRegion\022\033.hbase.pb.MoveRegionRequest\032" +
- "\034.hbase.pb.MoveRegionResponse\022k\n\026Dispatc" +
- "hMergingRegions\022\'.hbase.pb.DispatchMergi" +
- "ngRegionsRequest\032(.hbase.pb.DispatchMerg" +
- "ingRegionsResponse\022\\\n\021MergeTableRegions\022" +
- "\".hbase.pb.MergeTableRegionsRequest\032#.hb" +
- "ase.pb.MergeTableRegionsResponse\022M\n\014Assi" +
- "gnRegion\022\035.hbase.pb.AssignRegionRequest\032" +
- "\036.hbase.pb.AssignRegionResponse\022S\n\016Unass" +
- "ignRegion\022\037.hbase.pb.UnassignRegionReque",
- "st\032 .hbase.pb.UnassignRegionResponse\022P\n\r" +
- "OfflineRegion\022\036.hbase.pb.OfflineRegionRe" +
- "quest\032\037.hbase.pb.OfflineRegionResponse\022J" +
- "\n\013DeleteTable\022\034.hbase.pb.DeleteTableRequ" +
- "est\032\035.hbase.pb.DeleteTableResponse\022P\n\rtr" +
- "uncateTable\022\036.hbase.pb.TruncateTableRequ" +
- "est\032\037.hbase.pb.TruncateTableResponse\022J\n\013" +
- "EnableTable\022\034.hbase.pb.EnableTableReques" +
- "t\032\035.hbase.pb.EnableTableResponse\022M\n\014Disa" +
- "bleTable\022\035.hbase.pb.DisableTableRequest\032",
- "\036.hbase.pb.DisableTableResponse\022J\n\013Modif" +
- "yTable\022\034.hbase.pb.ModifyTableRequest\032\035.h" +
- "base.pb.ModifyTableResponse\022J\n\013CreateTab" +
- "le\022\034.hbase.pb.CreateTableRequest\032\035.hbase" +
- ".pb.CreateTableResponse\022A\n\010Shutdown\022\031.hb" +
- "ase.pb.ShutdownRequest\032\032.hbase.pb.Shutdo" +
- "wnResponse\022G\n\nStopMaster\022\033.hbase.pb.Stop" +
- "MasterRequest\032\034.hbase.pb.StopMasterRespo" +
- "nse\022h\n\031IsMasterInMaintenanceMode\022$.hbase" +
- ".pb.IsInMaintenanceModeRequest\032%.hbase.p",
- "b.IsInMaintenanceModeResponse\022>\n\007Balance" +
- "\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.Ba" +
- "lanceResponse\022_\n\022SetBalancerRunning\022#.hb" +
- "ase.pb.SetBalancerRunningRequest\032$.hbase" +
- ".pb.SetBalancerRunningResponse\022\\\n\021IsBala" +
- "ncerEnabled\022\".hbase.pb.IsBalancerEnabled" +
- "Request\032#.hbase.pb.IsBalancerEnabledResp" +
- "onse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.p" +
- "b.SetSplitOrMergeEnabledRequest\032(.hbase." +
- "pb.SetSplitOrMergeEnabledResponse\022h\n\025IsS",
- "plitOrMergeEnabled\022&.hbase.pb.IsSplitOrM" +
- "ergeEnabledRequest\032\'.hbase.pb.IsSplitOrM" +
- "ergeEnabledResponse\022D\n\tNormalize\022\032.hbase" +
- ".pb.NormalizeRequest\032\033.hbase.pb.Normaliz" +
- "eResponse\022e\n\024SetNormalizerRunning\022%.hbas" +
- "e.pb.SetNormalizerRunningRequest\032&.hbase" +
- ".pb.SetNormalizerRunningResponse\022b\n\023IsNo" +
- "rmalizerEnabled\022$.hbase.pb.IsNormalizerE" +
- "nabledRequest\032%.hbase.pb.IsNormalizerEna" +
- "bledResponse\022S\n\016RunCatalogScan\022\037.hbase.p",
- "b.RunCatalogScanRequest\032 .hbase.pb.RunCa" +
- "talogScanResponse\022e\n\024EnableCatalogJanito" +
- "r\022%.hbase.pb.EnableCatalogJanitorRequest" +
- "\032&.hbase.pb.EnableCatalogJanitorResponse" +
- "\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.I" +
- "sCatalogJanitorEnabledRequest\032).hbase.pb" +
- ".IsCatalogJanitorEnabledResponse\022^\n\021Exec" +
- "MasterService\022#.hbase.pb.CoprocessorServ" +
- "iceRequest\032$.hbase.pb.CoprocessorService" +
- "Response\022A\n\010Snapshot\022\031.hbase.pb.Snapshot",
- "Request\032\032.hbase.pb.SnapshotResponse\022h\n\025G" +
- "etCompletedSnapshots\022&.hbase.pb.GetCompl" +
- "etedSnapshotsRequest\032\'.hbase.pb.GetCompl" +
- "etedSnapshotsResponse\022S\n\016DeleteSnapshot\022" +
- "\037.hbase.pb.DeleteSnapshotRequest\032 .hbase" +
- ".pb.DeleteSnapshotResponse\022S\n\016IsSnapshot" +
- "Done\022\037.hbase.pb.IsSnapshotDoneRequest\032 ." +
- "hbase.pb.IsSnapshotDoneResponse\022V\n\017Resto" +
- "reSnapshot\022 .hbase.pb.RestoreSnapshotReq" +
- "uest\032!.hbase.pb.RestoreSnapshotResponse\022",
- "P\n\rExecProcedure\022\036.hbase.pb.ExecProcedur" +
- "eRequest\032\037.hbase.pb.ExecProcedureRespons" +
- "e\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Exe" +
- "cProcedureRequest\032\037.hbase.pb.ExecProcedu" +
- "reResponse\022V\n\017IsProcedureDone\022 .hbase.pb" +
- ".IsProcedureDoneRequest\032!.hbase.pb.IsPro" +
- "cedureDoneResponse\022V\n\017ModifyNamespace\022 ." +
- "hbase.pb.ModifyNamespaceRequest\032!.hbase." +
- "pb.ModifyNamespaceResponse\022V\n\017CreateName" +
- "space\022 .hbase.pb.CreateNamespaceRequest\032",
- "!.hbase.pb.CreateNamespaceResponse\022V\n\017De" +
- "leteNamespace\022 .hbase.pb.DeleteNamespace" +
- "Request\032!.hbase.pb.DeleteNamespaceRespon" +
- "se\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb." +
- "GetNamespaceDescriptorRequest\032(.hbase.pb" +
- ".GetNamespaceDescriptorResponse\022q\n\030ListN" +
- "amespaceDescriptors\022).hbase.pb.ListNames" +
- "paceDescriptorsRequest\032*.hbase.pb.ListNa" +
- "mespaceDescriptorsResponse\022\206\001\n\037ListTable" +
- "DescriptorsByNamespace\0220.hbase.pb.ListTa",
- "bleDescriptorsByNamespaceRequest\0321.hbase" +
- ".pb.ListTableDescriptorsByNamespaceRespo" +
- "nse\022t\n\031ListTableNamesByNamespace\022*.hbase" +
- ".pb.ListTableNamesByNamespaceRequest\032+.h" +
- "base.pb.ListTableNamesByNamespaceRespons" +
- "e\022P\n\rGetTableState\022\036.hbase.pb.GetTableSt" +
- "ateRequest\032\037.hbase.pb.GetTableStateRespo" +
- "nse\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaReque" +
- "st\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLas" +
- "tMajorCompactionTimestamp\022).hbase.pb.Maj",
- "orCompactionTimestampRequest\032*.hbase.pb." +
- "MajorCompactionTimestampResponse\022\212\001\n(get" +
- "LastMajorCompactionTimestampForRegion\0222." +
- "hbase.pb.MajorCompactionTimestampForRegi" +
- "onRequest\032*.hbase.pb.MajorCompactionTime" +
- "stampResponse\022_\n\022getProcedureResult\022#.hb" +
- "ase.pb.GetProcedureResultRequest\032$.hbase" +
- ".pb.GetProcedureResultResponse\022h\n\027getSec" +
- "urityCapabilities\022%.hbase.pb.SecurityCap" +
- "abilitiesRequest\032&.hbase.pb.SecurityCapa",
- "bilitiesResponse\022S\n\016AbortProcedure\022\037.hba" +
- "se.pb.AbortProcedureRequest\032 .hbase.pb.A" +
- "bortProcedureResponse\022S\n\016ListProcedures\022" +
- "\037.hbase.pb.ListProceduresRequest\032 .hbase" +
- ".pb.ListProceduresResponseBI\n1org.apache" +
- ".hadoop.hbase.shaded.protobuf.generatedB" +
- "\014MasterProtosH\001\210\001\001\240\001\001"
+ "iption\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" +
+ "\003 \001(\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007p" +
+ "roc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n" +
+ "\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescr" +
+ "iption\"^\n\026IsSnapshotDoneResponse\022\023\n\004done" +
+ "\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase." +
+ "pb.SnapshotDescription\"O\n\034IsRestoreSnaps" +
+ "hotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase" +
+ ".pb.SnapshotDescription\"4\n\035IsRestoreSnap",
+ "shotDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n" +
+ "\033GetSchemaAlterStatusRequest\022\'\n\ntable_na" +
+ "me\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034GetSche" +
+ "maAlterStatusResponse\022\035\n\025yet_to_update_r" +
+ "egions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032" +
+ "GetTableDescriptorsRequest\022(\n\013table_name" +
+ "s\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 " +
+ "\001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021" +
+ "\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescriptors" +
+ "Response\022+\n\014table_schema\030\001 \003(\0132\025.hbase.p",
+ "b.TableSchema\"[\n\024GetTableNamesRequest\022\r\n" +
+ "\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010" +
+ ":\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNa" +
+ "mesResponse\022(\n\013table_names\030\001 \003(\0132\023.hbase" +
+ ".pb.TableName\"?\n\024GetTableStateRequest\022\'\n" +
+ "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"B" +
+ "\n\025GetTableStateResponse\022)\n\013table_state\030\001" +
+ " \002(\0132\024.hbase.pb.TableState\"\031\n\027GetCluster" +
+ "StatusRequest\"K\n\030GetClusterStatusRespons" +
+ "e\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clu",
+ "sterStatus\"\030\n\026IsMasterRunningRequest\"4\n\027" +
+ "IsMasterRunningResponse\022\031\n\021is_master_run" +
+ "ning\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tpr" +
+ "ocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDescri" +
+ "ption\"F\n\025ExecProcedureResponse\022\030\n\020expect" +
+ "ed_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n" +
+ "\026IsProcedureDoneRequest\0221\n\tprocedure\030\001 \001" +
+ "(\0132\036.hbase.pb.ProcedureDescription\"`\n\027Is" +
+ "ProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005fal" +
+ "se\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Procedur",
+ "eDescription\",\n\031GetProcedureResultReques" +
+ "t\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResul" +
+ "tResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetP" +
+ "rocedureResultResponse.State\022\022\n\nstart_ti" +
+ "me\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030" +
+ "\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Fore" +
+ "ignExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUN" +
+ "D\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortP" +
+ "rocedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayI" +
+ "nterruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortPr",
+ "ocedureResponse\022\034\n\024is_procedure_aborted\030" +
+ "\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListPr" +
+ "oceduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hb" +
+ "ase.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\t" +
+ "user_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tn" +
+ "amespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hba" +
+ "se.pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016b" +
+ "ypass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031." +
+ "hbase.pb.ThrottleRequest\"\022\n\020SetQuotaResp" +
+ "onse\"J\n\037MajorCompactionTimestampRequest\022",
+ "\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName" +
+ "\"U\n(MajorCompactionTimestampForRegionReq" +
+ "uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" +
+ "ecifier\"@\n MajorCompactionTimestampRespo" +
+ "nse\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Sec" +
+ "urityCapabilitiesRequest\"\354\001\n\034SecurityCap" +
+ "abilitiesResponse\022G\n\014capabilities\030\001 \003(\0162" +
+ "1.hbase.pb.SecurityCapabilitiesResponse." +
+ "Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTH" +
+ "ENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022",
+ "\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION" +
+ "\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwitchT" +
+ "ype\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\374*\n\rMasterServ" +
+ "ice\022e\n\024GetSchemaAlterStatus\022%.hbase.pb.G" +
+ "etSchemaAlterStatusRequest\032&.hbase.pb.Ge" +
+ "tSchemaAlterStatusResponse\022b\n\023GetTableDe" +
+ "scriptors\022$.hbase.pb.GetTableDescriptors" +
+ "Request\032%.hbase.pb.GetTableDescriptorsRe" +
+ "sponse\022P\n\rGetTableNames\022\036.hbase.pb.GetTa" +
+ "bleNamesRequest\032\037.hbase.pb.GetTableNames",
+ "Response\022Y\n\020GetClusterStatus\022!.hbase.pb." +
+ "GetClusterStatusRequest\032\".hbase.pb.GetCl" +
+ "usterStatusResponse\022V\n\017IsMasterRunning\022 " +
+ ".hbase.pb.IsMasterRunningRequest\032!.hbase" +
+ ".pb.IsMasterRunningResponse\022D\n\tAddColumn" +
+ "\022\032.hbase.pb.AddColumnRequest\032\033.hbase.pb." +
+ "AddColumnResponse\022M\n\014DeleteColumn\022\035.hbas" +
+ "e.pb.DeleteColumnRequest\032\036.hbase.pb.Dele" +
+ "teColumnResponse\022M\n\014ModifyColumn\022\035.hbase" +
+ ".pb.ModifyColumnRequest\032\036.hbase.pb.Modif",
+ "yColumnResponse\022G\n\nMoveRegion\022\033.hbase.pb" +
+ ".MoveRegionRequest\032\034.hbase.pb.MoveRegion" +
+ "Response\022k\n\026DispatchMergingRegions\022\'.hba" +
+ "se.pb.DispatchMergingRegionsRequest\032(.hb" +
+ "ase.pb.DispatchMergingRegionsResponse\022\\\n" +
+ "\021MergeTableRegions\022\".hbase.pb.MergeTable" +
+ "RegionsRequest\032#.hbase.pb.MergeTableRegi" +
+ "onsResponse\022M\n\014AssignRegion\022\035.hbase.pb.A" +
+ "ssignRegionRequest\032\036.hbase.pb.AssignRegi" +
+ "onResponse\022S\n\016UnassignRegion\022\037.hbase.pb.",
+ "UnassignRegionRequest\032 .hbase.pb.Unassig" +
+ "nRegionResponse\022P\n\rOfflineRegion\022\036.hbase" +
+ ".pb.OfflineRegionRequest\032\037.hbase.pb.Offl" +
+ "ineRegionResponse\022J\n\013DeleteTable\022\034.hbase" +
+ ".pb.DeleteTableRequest\032\035.hbase.pb.Delete" +
+ "TableResponse\022P\n\rtruncateTable\022\036.hbase.p" +
+ "b.TruncateTableRequest\032\037.hbase.pb.Trunca" +
+ "teTableResponse\022J\n\013EnableTable\022\034.hbase.p" +
+ "b.EnableTableRequest\032\035.hbase.pb.EnableTa" +
+ "bleResponse\022M\n\014DisableTable\022\035.hbase.pb.D",
+ "isableTableRequest\032\036.hbase.pb.DisableTab" +
+ "leResponse\022J\n\013ModifyTable\022\034.hbase.pb.Mod" +
+ "ifyTableRequest\032\035.hbase.pb.ModifyTableRe" +
+ "sponse\022J\n\013CreateTable\022\034.hbase.pb.CreateT" +
+ "ableRequest\032\035.hbase.pb.CreateTableRespon" +
+ "se\022A\n\010Shutdown\022\031.hbase.pb.ShutdownReques" +
+ "t\032\032.hbase.pb.ShutdownResponse\022G\n\nStopMas" +
+ "ter\022\033.hbase.pb.StopMasterRequest\032\034.hbase" +
+ ".pb.StopMasterResponse\022h\n\031IsMasterInMain" +
+ "tenanceMode\022$.hbase.pb.IsInMaintenanceMo",
+ "deRequest\032%.hbase.pb.IsInMaintenanceMode" +
+ "Response\022>\n\007Balance\022\030.hbase.pb.BalanceRe" +
+ "quest\032\031.hbase.pb.BalanceResponse\022_\n\022SetB" +
+ "alancerRunning\022#.hbase.pb.SetBalancerRun" +
+ "ningRequest\032$.hbase.pb.SetBalancerRunnin" +
+ "gResponse\022\\\n\021IsBalancerEnabled\022\".hbase.p" +
+ "b.IsBalancerEnabledRequest\032#.hbase.pb.Is" +
+ "BalancerEnabledResponse\022k\n\026SetSplitOrMer" +
+ "geEnabled\022\'.hbase.pb.SetSplitOrMergeEnab" +
+ "ledRequest\032(.hbase.pb.SetSplitOrMergeEna",
+ "bledResponse\022h\n\025IsSplitOrMergeEnabled\022&." +
+ "hbase.pb.IsSplitOrMergeEnabledRequest\032\'." +
+ "hbase.pb.IsSplitOrMergeEnabledResponse\022D" +
+ "\n\tNormalize\022\032.hbase.pb.NormalizeRequest\032" +
+ "\033.hbase.pb.NormalizeResponse\022e\n\024SetNorma" +
+ "lizerRunning\022%.hbase.pb.SetNormalizerRun" +
+ "ningRequest\032&.hbase.pb.SetNormalizerRunn" +
+ "ingResponse\022b\n\023IsNormalizerEnabled\022$.hba" +
+ "se.pb.IsNormalizerEnabledRequest\032%.hbase" +
+ ".pb.IsNormalizerEnabledResponse\022S\n\016RunCa",
+ "talogScan\022\037.hbase.pb.RunCatalogScanReque" +
+ "st\032 .hbase.pb.RunCatalogScanResponse\022e\n\024" +
+ "EnableCatalogJanitor\022%.hbase.pb.EnableCa" +
+ "talogJanitorRequest\032&.hbase.pb.EnableCat" +
+ "alogJanitorResponse\022n\n\027IsCatalogJanitorE" +
+ "nabled\022(.hbase.pb.IsCatalogJanitorEnable" +
+ "dRequest\032).hbase.pb.IsCatalogJanitorEnab" +
+ "ledResponse\022^\n\021ExecMasterService\022#.hbase" +
+ ".pb.CoprocessorServiceRequest\032$.hbase.pb" +
+ ".CoprocessorServiceResponse\022A\n\010Snapshot\022",
+ "\031.hbase.pb.SnapshotRequest\032\032.hbase.pb.Sn" +
+ "apshotResponse\022h\n\025GetCompletedSnapshots\022" +
+ "&.hbase.pb.GetCompletedSnapshotsRequest\032" +
+ "\'.hbase.pb.GetCompletedSnapshotsResponse" +
+ "\022S\n\016DeleteSnapshot\022\037.hbase.pb.DeleteSnap" +
+ "shotRequest\032 .hbase.pb.DeleteSnapshotRes" +
+ "ponse\022S\n\016IsSnapshotDone\022\037.hbase.pb.IsSna" +
+ "pshotDoneRequest\032 .hbase.pb.IsSnapshotDo" +
+ "neResponse\022V\n\017RestoreSnapshot\022 .hbase.pb" +
+ ".RestoreSnapshotRequest\032!.hbase.pb.Resto",
+ "reSnapshotResponse\022P\n\rExecProcedure\022\036.hb" +
+ "ase.pb.ExecProcedureRequest\032\037.hbase.pb.E" +
+ "xecProcedureResponse\022W\n\024ExecProcedureWit" +
+ "hRet\022\036.hbase.pb.ExecProcedureRequest\032\037.h" +
+ "base.pb.ExecProcedureResponse\022V\n\017IsProce" +
+ "dureDone\022 .hbase.pb.IsProcedureDoneReque" +
+ "st\032!.hbase.pb.IsProcedureDoneResponse\022V\n" +
+ "\017ModifyNamespace\022 .hbase.pb.ModifyNamesp" +
+ "aceRequest\032!.hbase.pb.ModifyNamespaceRes" +
+ "ponse\022V\n\017CreateNamespace\022 .hbase.pb.Crea",
+ "teNamespaceRequest\032!.hbase.pb.CreateName" +
+ "spaceResponse\022V\n\017DeleteNamespace\022 .hbase" +
+ ".pb.DeleteNamespaceRequest\032!.hbase.pb.De" +
+ "leteNamespaceResponse\022k\n\026GetNamespaceDes" +
+ "criptor\022\'.hbase.pb.GetNamespaceDescripto" +
+ "rRequest\032(.hbase.pb.GetNamespaceDescript" +
+ "orResponse\022q\n\030ListNamespaceDescriptors\022)" +
+ ".hbase.pb.ListNamespaceDescriptorsReques" +
+ "t\032*.hbase.pb.ListNamespaceDescriptorsRes" +
+ "ponse\022\206\001\n\037ListTableDescriptorsByNamespac",
+ "e\0220.hbase.pb.ListTableDescriptorsByNames" +
+ "paceRequest\0321.hbase.pb.ListTableDescript" +
+ "orsByNamespaceResponse\022t\n\031ListTableNames" +
+ "ByNamespace\022*.hbase.pb.ListTableNamesByN" +
+ "amespaceRequest\032+.hbase.pb.ListTableName" +
+ "sByNamespaceResponse\022P\n\rGetTableState\022\036." +
+ "hbase.pb.GetTableStateRequest\032\037.hbase.pb" +
+ ".GetTableStateResponse\022A\n\010SetQuota\022\031.hba" +
+ "se.pb.SetQuotaRequest\032\032.hbase.pb.SetQuot" +
+ "aResponse\022x\n\037getLastMajorCompactionTimes",
+ "tamp\022).hbase.pb.MajorCompactionTimestamp" +
+ "Request\032*.hbase.pb.MajorCompactionTimest" +
+ "ampResponse\022\212\001\n(getLastMajorCompactionTi" +
+ "mestampForRegion\0222.hbase.pb.MajorCompact" +
+ "ionTimestampForRegionRequest\032*.hbase.pb." +
+ "MajorCompactionTimestampResponse\022_\n\022getP" +
+ "rocedureResult\022#.hbase.pb.GetProcedureRe" +
+ "sultRequest\032$.hbase.pb.GetProcedureResul" +
+ "tResponse\022h\n\027getSecurityCapabilities\022%.h" +
+ "base.pb.SecurityCapabilitiesRequest\032&.hb",
+ "ase.pb.SecurityCapabilitiesResponse\022S\n\016A" +
+ "bortProcedure\022\037.hbase.pb.AbortProcedureR" +
+ "equest\032 .hbase.pb.AbortProcedureResponse" +
+ "\022S\n\016ListProcedures\022\037.hbase.pb.ListProced" +
+ "uresRequest\032 .hbase.pb.ListProceduresRes" +
+ "ponse\022_\n\022AddReplicationPeer\022#.hbase.pb.A" +
+ "ddReplicationPeerRequest\032$.hbase.pb.AddR" +
+ "eplicationPeerResponse\022h\n\025RemoveReplicat" +
+ "ionPeer\022&.hbase.pb.RemoveReplicationPeer" +
+ "Request\032\'.hbase.pb.RemoveReplicationPeer",
+ "ResponseBI\n1org.apache.hadoop.hbase.shad" +
+ "ed.protobuf.generatedB\014MasterProtosH\001\210\001\001" +
+ "\240\001\001"
};
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
@@ -71341,6 +71505,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.getDescriptor(),
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor(),
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor(),
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.getDescriptor(),
}, assigner);
internal_static_hbase_pb_AddColumnRequest_descriptor =
getDescriptor().getMessageTypes().get(0);
@@ -72026,6 +72191,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.getDescriptor();
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor();
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor();
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
new file mode 100644
index 00000000000..c91796d116b
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
@@ -0,0 +1,2158 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: Replication.proto
+
+package org.apache.hadoop.hbase.shaded.protobuf.generated;
+
+public final class ReplicationProtos {
+ private ReplicationProtos() {}
+ public static void registerAllExtensions(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite registry) {
+ }
+
+ public static void registerAllExtensions(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry registry) {
+ registerAllExtensions(
+ (org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) registry);
+ }
+ public interface AddReplicationPeerRequestOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:hbase.pb.AddReplicationPeerRequest)
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * required string peer_id = 1;
+ */
+ boolean hasPeerId();
+ /**
+ * required string peer_id = 1;
+ */
+ java.lang.String getPeerId();
+ /**
+ * required string peer_id = 1;
+ */
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+ getPeerIdBytes();
+
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ boolean hasPeerConfig();
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig();
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.AddReplicationPeerRequest}
+ */
+ public static final class AddReplicationPeerRequest extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+ // @@protoc_insertion_point(message_implements:hbase.pb.AddReplicationPeerRequest)
+ AddReplicationPeerRequestOrBuilder {
+ // Use AddReplicationPeerRequest.newBuilder() to construct.
+ private AddReplicationPeerRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+ private AddReplicationPeerRequest() {
+ peerId_ = "";
+ }
+
+ @java.lang.Override
+ public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private AddReplicationPeerRequest(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ int mutable_bitField0_ = 0;
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+ bitField0_ |= 0x00000001;
+ peerId_ = bs;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = peerConfig_.toBuilder();
+ }
+ peerConfig_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(peerConfig_);
+ peerConfig_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ }
+ }
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+ e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerRequest_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.Builder.class);
+ }
+
+ private int bitField0_;
+ public static final int PEER_ID_FIELD_NUMBER = 1;
+ private volatile java.lang.Object peerId_;
+ /**
+ * required string peer_id = 1;
+ */
+ public boolean hasPeerId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public java.lang.String getPeerId() {
+ java.lang.Object ref = peerId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+ (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ peerId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+ getPeerIdBytes() {
+ java.lang.Object ref = peerId_;
+ if (ref instanceof java.lang.String) {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ peerId_ = b;
+ return b;
+ } else {
+ return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int PEER_CONFIG_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer peerConfig_;
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public boolean hasPeerConfig() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig() {
+ return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+ }
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder() {
+ return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ if (!hasPeerId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasPeerConfig()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getPeerConfig().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, peerId_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, getPeerConfig());
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, peerId_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, getPeerConfig());
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest) obj;
+
+ boolean result = true;
+ result = result && (hasPeerId() == other.hasPeerId());
+ if (hasPeerId()) {
+ result = result && getPeerId()
+ .equals(other.getPeerId());
+ }
+ result = result && (hasPeerConfig() == other.hasPeerConfig());
+ if (hasPeerConfig()) {
+ result = result && getPeerConfig()
+ .equals(other.getPeerConfig());
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasPeerId()) {
+ hash = (37 * hash) + PEER_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getPeerId().hashCode();
+ }
+ if (hasPeerConfig()) {
+ hash = (37 * hash) + PEER_CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getPeerConfig().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(byte[] data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+ byte[] data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.AddReplicationPeerRequest}
+ */
+ public static final class Builder extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements
+ // @@protoc_insertion_point(builder_implements:hbase.pb.AddReplicationPeerRequest)
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequestOrBuilder {
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerRequest_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .alwaysUseFieldBuilders) {
+ getPeerConfigFieldBuilder();
+ }
+ }
+ public Builder clear() {
+ super.clear();
+ peerId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (peerConfigBuilder_ == null) {
+ peerConfig_ = null;
+ } else {
+ peerConfigBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest build() {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest buildPartial() {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.peerId_ = peerId_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (peerConfigBuilder_ == null) {
+ result.peerConfig_ = peerConfig_;
+ } else {
+ result.peerConfig_ = peerConfigBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder clone() {
+ return (Builder) super.clone();
+ }
+ public Builder setField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ Object value) {
+ return (Builder) super.setField(field, value);
+ }
+ public Builder clearField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return (Builder) super.clearField(field);
+ }
+ public Builder clearOneof(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return (Builder) super.clearOneof(oneof);
+ }
+ public Builder setRepeatedField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index, Object value) {
+ return (Builder) super.setRepeatedField(field, index, value);
+ }
+ public Builder addRepeatedField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ Object value) {
+ return (Builder) super.addRepeatedField(field, value);
+ }
+ public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest other) {
+ if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance()) return this;
+ if (other.hasPeerId()) {
+ bitField0_ |= 0x00000001;
+ peerId_ = other.peerId_;
+ onChanged();
+ }
+ if (other.hasPeerConfig()) {
+ mergePeerConfig(other.getPeerConfig());
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasPeerId()) {
+ return false;
+ }
+ if (!hasPeerConfig()) {
+ return false;
+ }
+ if (!getPeerConfig().isInitialized()) {
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest) e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private java.lang.Object peerId_ = "";
+ /**
+ * required string peer_id = 1;
+ */
+ public boolean hasPeerId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public java.lang.String getPeerId() {
+ java.lang.Object ref = peerId_;
+ if (!(ref instanceof java.lang.String)) {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+ (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ peerId_ = s;
+ }
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+ getPeerIdBytes() {
+ java.lang.Object ref = peerId_;
+ if (ref instanceof String) {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ peerId_ = b;
+ return b;
+ } else {
+ return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public Builder setPeerId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ peerId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public Builder clearPeerId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ peerId_ = getDefaultInstance().getPeerId();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public Builder setPeerIdBytes(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ peerId_ = value;
+ onChanged();
+ return this;
+ }
+
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer peerConfig_ = null;
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder> peerConfigBuilder_;
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public boolean hasPeerConfig() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig() {
+ if (peerConfigBuilder_ == null) {
+ return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+ } else {
+ return peerConfigBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public Builder setPeerConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer value) {
+ if (peerConfigBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ peerConfig_ = value;
+ onChanged();
+ } else {
+ peerConfigBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public Builder setPeerConfig(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder builderForValue) {
+ if (peerConfigBuilder_ == null) {
+ peerConfig_ = builderForValue.build();
+ onChanged();
+ } else {
+ peerConfigBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public Builder mergePeerConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer value) {
+ if (peerConfigBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ peerConfig_ != null &&
+ peerConfig_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance()) {
+ peerConfig_ =
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.newBuilder(peerConfig_).mergeFrom(value).buildPartial();
+ } else {
+ peerConfig_ = value;
+ }
+ onChanged();
+ } else {
+ peerConfigBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public Builder clearPeerConfig() {
+ if (peerConfigBuilder_ == null) {
+ peerConfig_ = null;
+ onChanged();
+ } else {
+ peerConfigBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder getPeerConfigBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getPeerConfigFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder() {
+ if (peerConfigBuilder_ != null) {
+ return peerConfigBuilder_.getMessageOrBuilder();
+ } else {
+ return peerConfig_ == null ?
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+ }
+ }
+ /**
+ * required .hbase.pb.ReplicationPeer peer_config = 2;
+ */
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder>
+ getPeerConfigFieldBuilder() {
+ if (peerConfigBuilder_ == null) {
+ peerConfigBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder>(
+ getPeerConfig(),
+ getParentForChildren(),
+ isClean());
+ peerConfig_ = null;
+ }
+ return peerConfigBuilder_;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.AddReplicationPeerRequest)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.AddReplicationPeerRequest)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser
+ PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() {
+ public AddReplicationPeerRequest parsePartialFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return new AddReplicationPeerRequest(input, extensionRegistry);
+ }
+ };
+
+ public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+
+ }
+
+ public interface AddReplicationPeerResponseOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:hbase.pb.AddReplicationPeerResponse)
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code hbase.pb.AddReplicationPeerResponse}
+ */
+ public static final class AddReplicationPeerResponse extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+ // @@protoc_insertion_point(message_implements:hbase.pb.AddReplicationPeerResponse)
+ AddReplicationPeerResponseOrBuilder {
+ // Use AddReplicationPeerResponse.newBuilder() to construct.
+ private AddReplicationPeerResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+ private AddReplicationPeerResponse() {
+ }
+
+ @java.lang.Override
+ public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private AddReplicationPeerResponse(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+ e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerResponse_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.Builder.class);
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) obj;
+
+ boolean result = true;
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(byte[] data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+ byte[] data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.AddReplicationPeerResponse}
+ */
+ public static final class Builder extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements
+ // @@protoc_insertion_point(builder_implements:hbase.pb.AddReplicationPeerResponse)
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponseOrBuilder {
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerResponse_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .alwaysUseFieldBuilders) {
+ }
+ }
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse build() {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse buildPartial() {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder clone() {
+ return (Builder) super.clone();
+ }
+ public Builder setField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ Object value) {
+ return (Builder) super.setField(field, value);
+ }
+ public Builder clearField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return (Builder) super.clearField(field);
+ }
+ public Builder clearOneof(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return (Builder) super.clearOneof(oneof);
+ }
+ public Builder setRepeatedField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index, Object value) {
+ return (Builder) super.setRepeatedField(field, index, value);
+ }
+ public Builder addRepeatedField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ Object value) {
+ return (Builder) super.addRepeatedField(field, value);
+ }
+ public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse other) {
+ if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.AddReplicationPeerResponse)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.AddReplicationPeerResponse)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser
+ PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() {
+ public AddReplicationPeerResponse parsePartialFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return new AddReplicationPeerResponse(input, extensionRegistry);
+ }
+ };
+
+ public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+
+ }
+
+ public interface RemoveReplicationPeerRequestOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:hbase.pb.RemoveReplicationPeerRequest)
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * required string peer_id = 1;
+ */
+ boolean hasPeerId();
+ /**
+ * required string peer_id = 1;
+ */
+ java.lang.String getPeerId();
+ /**
+ * required string peer_id = 1;
+ */
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+ getPeerIdBytes();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RemoveReplicationPeerRequest}
+ */
+ public static final class RemoveReplicationPeerRequest extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+ // @@protoc_insertion_point(message_implements:hbase.pb.RemoveReplicationPeerRequest)
+ RemoveReplicationPeerRequestOrBuilder {
+ // Use RemoveReplicationPeerRequest.newBuilder() to construct.
+ private RemoveReplicationPeerRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+ private RemoveReplicationPeerRequest() {
+ peerId_ = "";
+ }
+
+ @java.lang.Override
+ public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private RemoveReplicationPeerRequest(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ int mutable_bitField0_ = 0;
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+ bitField0_ |= 0x00000001;
+ peerId_ = bs;
+ break;
+ }
+ }
+ }
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+ e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.Builder.class);
+ }
+
+ private int bitField0_;
+ public static final int PEER_ID_FIELD_NUMBER = 1;
+ private volatile java.lang.Object peerId_;
+ /**
+ * required string peer_id = 1;
+ */
+ public boolean hasPeerId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public java.lang.String getPeerId() {
+ java.lang.Object ref = peerId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+ (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ peerId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+ getPeerIdBytes() {
+ java.lang.Object ref = peerId_;
+ if (ref instanceof java.lang.String) {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ peerId_ = b;
+ return b;
+ } else {
+ return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ if (!hasPeerId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, peerId_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, peerId_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest) obj;
+
+ boolean result = true;
+ result = result && (hasPeerId() == other.hasPeerId());
+ if (hasPeerId()) {
+ result = result && getPeerId()
+ .equals(other.getPeerId());
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasPeerId()) {
+ hash = (37 * hash) + PEER_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getPeerId().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(byte[] data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+ byte[] data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RemoveReplicationPeerRequest}
+ */
+ public static final class Builder extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements
+ // @@protoc_insertion_point(builder_implements:hbase.pb.RemoveReplicationPeerRequest)
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequestOrBuilder {
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .alwaysUseFieldBuilders) {
+ }
+ }
+ public Builder clear() {
+ super.clear();
+ peerId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest build() {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest buildPartial() {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.peerId_ = peerId_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder clone() {
+ return (Builder) super.clone();
+ }
+ public Builder setField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ Object value) {
+ return (Builder) super.setField(field, value);
+ }
+ public Builder clearField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return (Builder) super.clearField(field);
+ }
+ public Builder clearOneof(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return (Builder) super.clearOneof(oneof);
+ }
+ public Builder setRepeatedField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index, Object value) {
+ return (Builder) super.setRepeatedField(field, index, value);
+ }
+ public Builder addRepeatedField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ Object value) {
+ return (Builder) super.addRepeatedField(field, value);
+ }
+ public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest other) {
+ if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance()) return this;
+ if (other.hasPeerId()) {
+ bitField0_ |= 0x00000001;
+ peerId_ = other.peerId_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasPeerId()) {
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest) e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private java.lang.Object peerId_ = "";
+ /**
+ * required string peer_id = 1;
+ */
+ public boolean hasPeerId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public java.lang.String getPeerId() {
+ java.lang.Object ref = peerId_;
+ if (!(ref instanceof java.lang.String)) {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+ (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ peerId_ = s;
+ }
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+ getPeerIdBytes() {
+ java.lang.Object ref = peerId_;
+ if (ref instanceof String) {
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ peerId_ = b;
+ return b;
+ } else {
+ return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public Builder setPeerId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ peerId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public Builder clearPeerId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ peerId_ = getDefaultInstance().getPeerId();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string peer_id = 1;
+ */
+ public Builder setPeerIdBytes(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ peerId_ = value;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.RemoveReplicationPeerRequest)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.RemoveReplicationPeerRequest)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser
+ PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() {
+ public RemoveReplicationPeerRequest parsePartialFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return new RemoveReplicationPeerRequest(input, extensionRegistry);
+ }
+ };
+
+ public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+
+ }
+
+ public interface RemoveReplicationPeerResponseOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:hbase.pb.RemoveReplicationPeerResponse)
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RemoveReplicationPeerResponse}
+ */
+ public static final class RemoveReplicationPeerResponse extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+ // @@protoc_insertion_point(message_implements:hbase.pb.RemoveReplicationPeerResponse)
+ RemoveReplicationPeerResponseOrBuilder {
+ // Use RemoveReplicationPeerResponse.newBuilder() to construct.
+ private RemoveReplicationPeerResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+ private RemoveReplicationPeerResponse() {
+ }
+
+ @java.lang.Override
+ public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private RemoveReplicationPeerResponse(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+ e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerResponse_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.Builder.class);
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse) obj;
+
+ boolean result = true;
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(byte[] data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+ byte[] data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RemoveReplicationPeerResponse}
+ */
+ public static final class Builder extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements
+ // @@protoc_insertion_point(builder_implements:hbase.pb.RemoveReplicationPeerResponse)
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponseOrBuilder {
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerResponse_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .alwaysUseFieldBuilders) {
+ }
+ }
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse build() {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse buildPartial() {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder clone() {
+ return (Builder) super.clone();
+ }
+ public Builder setField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ Object value) {
+ return (Builder) super.setField(field, value);
+ }
+ public Builder clearField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return (Builder) super.clearField(field);
+ }
+ public Builder clearOneof(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return (Builder) super.clearOneof(oneof);
+ }
+ public Builder setRepeatedField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index, Object value) {
+ return (Builder) super.setRepeatedField(field, index, value);
+ }
+ public Builder addRepeatedField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ Object value) {
+ return (Builder) super.addRepeatedField(field, value);
+ }
+ public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse other) {
+ if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse) e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.RemoveReplicationPeerResponse)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.RemoveReplicationPeerResponse)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser
+ PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() {
+ public RemoveReplicationPeerResponse parsePartialFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return new RemoveReplicationPeerResponse(input, extensionRegistry);
+ }
+ };
+
+ public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+
+ }
+
+ private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_AddReplicationPeerRequest_descriptor;
+ private static final
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_hbase_pb_AddReplicationPeerRequest_fieldAccessorTable;
+ private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_AddReplicationPeerResponse_descriptor;
+ private static final
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_hbase_pb_AddReplicationPeerResponse_fieldAccessorTable;
+ private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor;
+ private static final
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_hbase_pb_RemoveReplicationPeerRequest_fieldAccessorTable;
+ private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_RemoveReplicationPeerResponse_descriptor;
+ private static final
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_hbase_pb_RemoveReplicationPeerResponse_fieldAccessorTable;
+
+ public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\021Replication.proto\022\010hbase.pb\032\017ZooKeeper" +
+ ".proto\"\\\n\031AddReplicationPeerRequest\022\017\n\007p" +
+ "eer_id\030\001 \002(\t\022.\n\013peer_config\030\002 \002(\0132\031.hbas" +
+ "e.pb.ReplicationPeer\"\034\n\032AddReplicationPe" +
+ "erResponse\"/\n\034RemoveReplicationPeerReque" +
+ "st\022\017\n\007peer_id\030\001 \002(\t\"\037\n\035RemoveReplication" +
+ "PeerResponseBN\n1org.apache.hadoop.hbase." +
+ "shaded.protobuf.generatedB\021ReplicationPr" +
+ "otosH\001\210\001\001\240\001\001"
+ };
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry assignDescriptors(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ return null;
+ }
+ };
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.getDescriptor(),
+ }, assigner);
+ internal_static_hbase_pb_AddReplicationPeerRequest_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_hbase_pb_AddReplicationPeerRequest_fieldAccessorTable = new
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_hbase_pb_AddReplicationPeerRequest_descriptor,
+ new java.lang.String[] { "PeerId", "PeerConfig", });
+ internal_static_hbase_pb_AddReplicationPeerResponse_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_hbase_pb_AddReplicationPeerResponse_fieldAccessorTable = new
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_hbase_pb_AddReplicationPeerResponse_descriptor,
+ new java.lang.String[] { });
+ internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor =
+ getDescriptor().getMessageTypes().get(2);
+ internal_static_hbase_pb_RemoveReplicationPeerRequest_fieldAccessorTable = new
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor,
+ new java.lang.String[] { "PeerId", });
+ internal_static_hbase_pb_RemoveReplicationPeerResponse_descriptor =
+ getDescriptor().getMessageTypes().get(3);
+ internal_static_hbase_pb_RemoveReplicationPeerResponse_fieldAccessorTable = new
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_hbase_pb_RemoveReplicationPeerResponse_descriptor,
+ new java.lang.String[] { });
+ org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.getDescriptor();
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index b283ed92e42..384ac674572 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -32,6 +32,7 @@ import "ClusterStatus.proto";
import "ErrorHandling.proto";
import "Procedure.proto";
import "Quota.proto";
+import "Replication.proto";
/* Column-level protobufs */
@@ -846,4 +847,12 @@ service MasterService {
/** returns a list of procedures */
rpc ListProcedures(ListProceduresRequest)
returns(ListProceduresResponse);
+
+ /** Add a replication peer */
+ rpc AddReplicationPeer(AddReplicationPeerRequest)
+ returns(AddReplicationPeerResponse);
+
+ /** Remove a replication peer */
+ rpc RemoveReplicationPeer(RemoveReplicationPeerRequest)
+ returns(RemoveReplicationPeerResponse);
}
diff --git a/hbase-protocol-shaded/src/main/protobuf/Replication.proto b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
new file mode 100644
index 00000000000..0bdf2c05ec8
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
@@ -0,0 +1,42 @@
+ /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "ReplicationProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "ZooKeeper.proto";
+
+message AddReplicationPeerRequest {
+ required string peer_id = 1;
+ required ReplicationPeer peer_config = 2;
+}
+
+message AddReplicationPeerResponse {
+}
+
+message RemoveReplicationPeerRequest {
+ required string peer_id = 1;
+}
+
+message RemoveReplicationPeerResponse {
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 9abcd5202bf..5067b3b87bf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
@@ -1827,4 +1828,45 @@ public interface MasterObserver extends Coprocessor {
void postBalanceRSGroup(final ObserverContext ctx,
String groupName, boolean balancerRan) throws IOException;
+ /**
+ * Called before add a replication peer
+ * @param ctx the environment to interact with the framework and master
+ * @param peerId a short name that identifies the peer
+ * @param peerConfig configuration for the replication peer
+ * @throws IOException on failure
+ */
+ default void preAddReplicationPeer(final ObserverContext ctx,
+ String peerId, ReplicationPeerConfig peerConfig) throws IOException {
+ }
+
+ /**
+ * Called after add a replication peer
+ * @param ctx the environment to interact with the framework and master
+ * @param peerId a short name that identifies the peer
+ * @param peerConfig configuration for the replication peer
+ * @throws IOException on failure
+ */
+ default void postAddReplicationPeer(final ObserverContext ctx,
+ String peerId, ReplicationPeerConfig peerConfig) throws IOException {
+ }
+
+ /**
+ * Called before remove a replication peer
+ * @param ctx
+ * @param peerId a short name that identifies the peer
+ * @throws IOException on failure
+ */
+ default void preRemoveReplicationPeer(final ObserverContext ctx,
+ String peerId) throws IOException {
+ }
+
+ /**
+ * Called after remove a replication peer
+ * @param ctx
+ * @param peerId a short name that identifies the peer
+ * @throws IOException on failure
+ */
+ default void postRemoveReplicationPeer(final ObserverContext ctx,
+ String peerId) throws IOException {
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index c5c246baf76..da35da15e9a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -119,6 +119,7 @@ import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure;
import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
+import org.apache.hadoop.hbase.master.replication.ReplicationManager;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
@@ -138,7 +139,12 @@ import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
+import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
@@ -291,6 +297,9 @@ public class HMaster extends HRegionServer implements MasterServices {
// manager of assignment nodes in zookeeper
private AssignmentManager assignmentManager;
+ // manager of replication
+ private ReplicationManager replicationManager;
+
// buffer for "fatal error" notices from region servers
// in the cluster. This is only used for assisting
// operations/debugging.
@@ -640,6 +649,8 @@ public class HMaster extends HRegionServer implements MasterServices {
this.balancer, this.service, this.metricsMaster,
this.tableLockManager, tableStateManager);
+ this.replicationManager = new ReplicationManager(conf, zooKeeper, this);
+
this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager);
this.regionServerTracker.start();
@@ -3135,4 +3146,30 @@ public class HMaster extends HRegionServer implements MasterServices {
public FavoredNodesManager getFavoredNodesManager() {
return favoredNodesManager;
}
+
+ @Override
+ public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
+ throws ReplicationException, IOException {
+ if (cpHost != null) {
+ cpHost.preAddReplicationPeer(peerId, peerConfig);
+ }
+ LOG.info(getClientIdAuditPrefix() + " creating replication peer, id=" + peerId + ", config="
+ + peerConfig);
+ this.replicationManager.addReplicationPeer(peerId, peerConfig);
+ if (cpHost != null) {
+ cpHost.postAddReplicationPeer(peerId, peerConfig);
+ }
+ }
+
+ @Override
+ public void removeReplicationPeer(String peerId) throws ReplicationException, IOException {
+ if (cpHost != null) {
+ cpHost.preRemoveReplicationPeer(peerId);
+ }
+ LOG.info(getClientIdAuditPrefix() + " removing replication peer, id=" + peerId);
+ this.replicationManager.removeReplicationPeer(peerId);
+ if (cpHost != null) {
+ cpHost.postRemoveReplicationPeer(peerId);
+ }
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index a18068d34f8..97fbe67fd7c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.security.User;
@@ -1645,4 +1646,45 @@ public class MasterCoprocessorHost
});
}
+ public void preAddReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig)
+ throws IOException {
+ execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+ @Override
+ public void call(MasterObserver observer, ObserverContext ctx)
+ throws IOException {
+ observer.preAddReplicationPeer(ctx, peerId, peerConfig);
+ }
+ });
+ }
+
+ public void postAddReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig)
+ throws IOException {
+ execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+ @Override
+ public void call(MasterObserver observer, ObserverContext ctx)
+ throws IOException {
+ observer.postAddReplicationPeer(ctx, peerId, peerConfig);
+ }
+ });
+ }
+
+ public void preRemoveReplicationPeer(final String peerId) throws IOException {
+ execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+ @Override
+ public void call(MasterObserver observer, ObserverContext ctx)
+ throws IOException {
+ observer.preRemoveReplicationPeer(ctx, peerId);
+ }
+ });
+ }
+
+ public void postRemoveReplicationPeer(final String peerId) throws IOException {
+ execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+ @Override
+ public void call(MasterObserver observer, ObserverContext ctx)
+ throws IOException {
+ observer.postRemoveReplicationPeer(ctx, peerId);
+ }
+ });
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 299007660eb..afd807c5f5d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
@@ -86,7 +87,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessController;
import org.apache.hadoop.hbase.security.visibility.VisibilityController;
@@ -1638,4 +1644,27 @@ public class MasterRpcServices extends RSRpcServices
}
return null;
}
+
+ @Override
+ public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
+ AddReplicationPeerRequest request) throws ServiceException {
+ try {
+ master.addReplicationPeer(request.getPeerId(),
+ ReplicationSerDeHelper.convert(request.getPeerConfig()));
+ return AddReplicationPeerResponse.newBuilder().build();
+ } catch (ReplicationException | IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
+ RemoveReplicationPeerRequest request) throws ServiceException {
+ try {
+ master.removeReplicationPeer(request.getPeerId());
+ return RemoveReplicationPeerResponse.newBuilder().build();
+ } catch (ReplicationException | IOException e) {
+ throw new ServiceException(e);
+ }
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 78451013c4a..5fc9d1693ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import com.google.protobuf.Service;
@@ -415,4 +417,18 @@ public interface MasterServices extends Server {
* @return Favored Nodes Manager
*/
public FavoredNodesManager getFavoredNodesManager();
+
+ /**
+ * Add a new replication peer for replicating data to slave cluster
+ * @param peerId a short name that identifies the peer
+ * @param peerConfig configuration for the replication slave cluster
+ */
+ void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
+ throws ReplicationException, IOException;
+
+ /**
+ * Removes a peer and stops the replication
+ * @param peerId a short name that identifies the peer
+ */
+ void removeReplicationPeer(String peerId) throws ReplicationException, IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
new file mode 100644
index 00000000000..748f7af64d9
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationFactory;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+
+/**
+ * Manages and performs all replication admin operations.
+ * Used to add/remove a replication peer.
+ */
+@InterfaceAudience.Private
+public class ReplicationManager {
+
+ private final Configuration conf;
+ private final ZooKeeperWatcher zkw;
+ private final ReplicationQueuesClient replicationQueuesClient;
+ private final ReplicationPeers replicationPeers;
+
+ public ReplicationManager(Configuration conf, ZooKeeperWatcher zkw, Abortable abortable)
+ throws IOException {
+ this.conf = conf;
+ this.zkw = zkw;
+ try {
+ this.replicationQueuesClient = ReplicationFactory
+ .getReplicationQueuesClient(new ReplicationQueuesClientArguments(conf, abortable, zkw));
+ this.replicationQueuesClient.init();
+ this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf,
+ this.replicationQueuesClient, abortable);
+ this.replicationPeers.init();
+ } catch (Exception e) {
+ throw new IOException("Failed to construct ReplicationManager", e);
+ }
+ }
+
+ public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
+ throws ReplicationException {
+ checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
+ peerConfig.getTableCFsMap());
+ this.replicationPeers.registerPeer(peerId, peerConfig);
+ }
+
+ public void removeReplicationPeer(String peerId) throws ReplicationException {
+ this.replicationPeers.unregisterPeer(peerId);
+ }
+
+ /**
+ * Set a namespace in the peer config means that all tables in this namespace
+ * will be replicated to the peer cluster.
+ *
+ * 1. If you already have set a namespace in the peer config, then you can't set any table
+ * of this namespace to the peer config.
+ * 2. If you already have set a table in the peer config, then you can't set this table's
+ * namespace to the peer config.
+ *
+ * @param namespaces
+ * @param tableCfs
+ * @throws ReplicationException
+ */
+ private void checkNamespacesAndTableCfsConfigConflict(Set namespaces,
+ Map> tableCfs) throws ReplicationException {
+ if (namespaces == null || namespaces.isEmpty()) {
+ return;
+ }
+ if (tableCfs == null || tableCfs.isEmpty()) {
+ return;
+ }
+ for (Map.Entry> entry : tableCfs.entrySet()) {
+ TableName table = entry.getKey();
+ if (namespaces.contains(table.getNamespaceAsString())) {
+ throw new ReplicationException(
+ "Table-cfs config conflict with namespaces config in peer");
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index d9afbc8b6bc..04528830748 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -105,6 +105,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.User;
@@ -2695,4 +2696,16 @@ public class AccessController extends BaseMasterAndRegionObserver
String groupName) throws IOException {
requirePermission(getActiveUser(ctx), "balanceRSGroup", Action.ADMIN);
}
+
+ @Override
+ public void preAddReplicationPeer(final ObserverContext ctx,
+ String peerId, ReplicationPeerConfig peerConfig) throws IOException {
+ requirePermission(getActiveUser(ctx), "addReplicationPeer", Action.ADMIN);
+ }
+
+ @Override
+ public void preRemoveReplicationPeer(final ObserverContext ctx,
+ String peerId) throws IOException {
+ requirePermission(getActiveUser(ctx), "removeReplicationPeer", Action.ADMIN);
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 7363fb97785..10c73a64a83 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationPeer;
@@ -76,8 +77,9 @@ public class TestReplicationAdmin {
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
- TEST_UTIL.startMiniZKCluster();
+ TEST_UTIL.startMiniCluster();
Configuration conf = TEST_UTIL.getConfiguration();
+ conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
admin = new ReplicationAdmin(conf);
}
@@ -86,7 +88,7 @@ public class TestReplicationAdmin {
if (admin != null) {
admin.close();
}
- TEST_UTIL.shutdownMiniZKCluster();
+ TEST_UTIL.shutdownMiniCluster();
}
/**
@@ -105,7 +107,7 @@ public class TestReplicationAdmin {
// try adding the same (fails)
try {
admin.addPeer(ID_ONE, rpc1, null);
- } catch (IllegalArgumentException iae) {
+ } catch (Exception e) {
// OK!
}
assertEquals(1, admin.getPeersCount());
@@ -113,14 +115,14 @@ public class TestReplicationAdmin {
try {
admin.removePeer(ID_SECOND);
fail();
- } catch (IllegalArgumentException iae) {
+ } catch (Exception iae) {
// OK!
}
assertEquals(1, admin.getPeersCount());
// Add a second since multi-slave is supported
try {
admin.addPeer(ID_SECOND, rpc2, null);
- } catch (IllegalStateException iae) {
+ } catch (Exception iae) {
fail();
}
assertEquals(2, admin.getPeersCount());
@@ -170,7 +172,7 @@ public class TestReplicationAdmin {
try {
admin.addPeer(ID_ONE, rpc1, null);
fail();
- } catch (ReplicationException e) {
+ } catch (Exception e) {
// OK!
}
repQueues.removeQueue(ID_ONE);
@@ -181,7 +183,7 @@ public class TestReplicationAdmin {
try {
admin.addPeer(ID_ONE, rpc2, null);
fail();
- } catch (ReplicationException e) {
+ } catch (Exception e) {
// OK!
}
repQueues.removeAllQueues();
@@ -422,7 +424,7 @@ public class TestReplicationAdmin {
}
@Test
- public void testNamespacesAndTableCfsConfigConflict() throws ReplicationException {
+ public void testNamespacesAndTableCfsConfigConflict() throws Exception {
String ns1 = "ns1";
String ns2 = "ns2";
TableName tab1 = TableName.valueOf("ns1:tabl");
@@ -471,7 +473,7 @@ public class TestReplicationAdmin {
}
@Test
- public void testPeerBandwidth() throws ReplicationException {
+ public void testPeerBandwidth() throws Exception {
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(KEY_ONE);
admin.addPeer(ID_ONE, rpc);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index aec40579e05..55138a0c5cf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -380,4 +382,13 @@ public class MockNoopMasterServices implements MasterServices, Server {
public MasterProcedureManagerHost getMasterProcedureManagerHost() {
return null;
}
+
+ @Override
+ public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
+ throws ReplicationException {
+ }
+
+ @Override
+ public void removeReplicationPeer(String peerId) throws ReplicationException {
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index 7f2fb08527a..2e83c56faa3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -275,8 +276,8 @@ public class TestMasterNoCluster {
void initClusterSchemaService() throws IOException, InterruptedException {}
@Override
- void initializeZKBasedSystemTrackers() throws IOException,
- InterruptedException, KeeperException, CoordinatedStateException {
+ void initializeZKBasedSystemTrackers() throws IOException, InterruptedException,
+ KeeperException, CoordinatedStateException {
super.initializeZKBasedSystemTrackers();
// Record a newer server in server manager at first
getServerManager().recordNewServerWithLock(newServer, ServerLoad.EMPTY_SERVERLOAD);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
index b3739fb2058..474039b91b7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
@@ -123,18 +123,18 @@ public class TestReplicationBase {
utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK);
zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
-
- ReplicationPeerConfig rpc = new ReplicationPeerConfig();
- rpc.setClusterKey(utility2.getClusterKey());
- admin.addPeer("2", rpc, null);
-
LOG.info("Setup second Zk");
+
CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
utility1.startMiniCluster(2);
// Have a bunch of slave servers, because inter-cluster shipping logic uses number of sinks
// as a component in deciding maximum number of parallel batches to send to the peer cluster.
utility2.startMiniCluster(4);
+ ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+ rpc.setClusterKey(utility2.getClusterKey());
+ admin.addPeer("2", rpc, null);
+
HTableDescriptor table = new HTableDescriptor(tableName);
HColumnDescriptor fam = new HColumnDescriptor(famName);
fam.setMaxVersions(100);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
index af0e3577111..a680f704c7d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
@@ -130,14 +130,14 @@ public class TestReplicationWithTags {
utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK);
- ReplicationPeerConfig rpc = new ReplicationPeerConfig();
- rpc.setClusterKey(utility2.getClusterKey());
- replicationAdmin.addPeer("2", rpc, null);
-
LOG.info("Setup second Zk");
utility1.startMiniCluster(2);
utility2.startMiniCluster(2);
+ ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+ rpc.setClusterKey(utility2.getClusterKey());
+ replicationAdmin.addPeer("2", rpc, null);
+
HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
fam.setMaxVersions(3);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
index 6fcccafd7ac..c9f43198920 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
@@ -106,14 +106,14 @@ public class TestSerialReplication {
utility2.setZkCluster(miniZK);
new ZooKeeperWatcher(conf2, "cluster2", null, true);
+ utility1.startMiniCluster(1, 10);
+ utility2.startMiniCluster(1, 1);
+
ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
admin1.addPeer("1", rpc, null);
- utility1.startMiniCluster(1, 10);
- utility2.startMiniCluster(1, 1);
-
utility1.getHBaseAdmin().setBalancerRunning(false, true);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 33ff094d53e..a0f6f29086b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2870,4 +2870,34 @@ public class TestAccessController extends SecureTestUtil {
verifyAllowed(action1, SUPERUSER, USER_ADMIN);
verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
+
+ @Test
+ public void testAddReplicationPeer() throws Exception {
+ AccessTestAction action = new AccessTestAction() {
+ @Override
+ public Object run() throws Exception {
+ ACCESS_CONTROLLER.preAddReplicationPeer(ObserverContext.createAndPrepare(CP_ENV, null),
+ "test", null);
+ return null;
+ }
+ };
+
+ verifyAllowed(action, SUPERUSER, USER_ADMIN);
+ verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+ }
+
+ @Test
+ public void testRemoveReplicationPeer() throws Exception {
+ AccessTestAction action = new AccessTestAction() {
+ @Override
+ public Object run() throws Exception {
+ ACCESS_CONTROLLER.preRemoveReplicationPeer(ObserverContext.createAndPrepare(CP_ENV, null),
+ "test");
+ return null;
+ }
+ };
+
+ verifyAllowed(action, SUPERUSER, USER_ADMIN);
+ verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
index fafa500e6a2..56a7260a0bd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
@@ -128,14 +128,16 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit
TEST_UTIL1 = new HBaseTestingUtility(conf1);
TEST_UTIL1.setZkCluster(miniZK);
zkw2 = new ZooKeeperWatcher(conf1, "cluster2", null, true);
- ReplicationPeerConfig rpc = new ReplicationPeerConfig();
- rpc.setClusterKey(TEST_UTIL1.getClusterKey());
- replicationAdmin.addPeer("2", rpc, null);
TEST_UTIL.startMiniCluster(1);
// Wait for the labels table to become available
TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
TEST_UTIL1.startMiniCluster(1);
+
+ ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+ rpc.setClusterKey(TEST_UTIL1.getClusterKey());
+ replicationAdmin.addPeer("2", rpc, null);
+
HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor desc = new HColumnDescriptor(fam);
desc.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
index a62a28194e0..31b74fb7ab8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
@@ -177,14 +177,16 @@ public class TestVisibilityLabelsReplication {
TEST_UTIL1 = new HBaseTestingUtility(conf1);
TEST_UTIL1.setZkCluster(miniZK);
zkw2 = new ZooKeeperWatcher(conf1, "cluster2", null, true);
- ReplicationPeerConfig rpc = new ReplicationPeerConfig();
- rpc.setClusterKey(TEST_UTIL1.getClusterKey());
- replicationAdmin.addPeer("2", rpc, null);
TEST_UTIL.startMiniCluster(1);
// Wait for the labels table to become available
TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
TEST_UTIL1.startMiniCluster(1);
+
+ ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+ rpc.setClusterKey(TEST_UTIL1.getClusterKey());
+ replicationAdmin.addPeer("2", rpc, null);
+
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor desc = new HColumnDescriptor(fam);
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index e222875494d..a71d9164c92 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -116,6 +116,8 @@ In case the table goes out of date, the unit tests which check for accuracy of p
| | setUserQuota(Table level) | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
| | setTableQuota | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
| | setNamespaceQuota | superuser\|global(A)
+| | addReplicationPeer | superuser\|global(A)
+| | removeReplicationPeer | superuser\|global(A)
| Region | openRegion | superuser\|global(A)
| | closeRegion | superuser\|global(A)
| | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)