diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 2a1b782065d..4039fe3dd5b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client; import java.io.Closeable; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.Future; @@ -48,6 +49,7 @@ import org.apache.hadoop.hbase.quotas.QuotaRetriever; import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; @@ -1879,6 +1881,35 @@ public interface Admin extends Abortable, Closeable { final ReplicationPeerConfig peerConfig) throws IOException { } + /** + * Return a list of replication peers. + * @return a list of replication peers description + * @throws IOException + */ + default List listReplicationPeers() throws IOException { + return new ArrayList<>(); + } + + /** + * Return a list of replication peers. + * @param regex The regular expression to match peer id + * @return a list of replication peers description + * @throws IOException + */ + default List listReplicationPeers(String regex) throws IOException { + return new ArrayList<>(); + } + + /** + * Return a list of replication peers. + * @param pattern The compiled regular expression to match peer id + * @return a list of replication peers description + * @throws IOException + */ + default List listReplicationPeers(Pattern pattern) throws IOException { + return new ArrayList<>(); + } + /** * Mark a region server as draining to prevent additional regions from getting assigned to it. * @param servers List of region servers to drain. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index ea11c25a413..cfed9f6a472 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -102,6 +102,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Enabl import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; @@ -1711,6 +1713,12 @@ class ConnectionImplementation implements ClusterConnection, Closeable { throws ServiceException { return stub.updateReplicationPeerConfig(controller, request); } + + @Override + public ListReplicationPeersResponse listReplicationPeers(RpcController controller, + ListReplicationPeersRequest request) throws ServiceException { + return stub.listReplicationPeers(controller, request); + } }; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 3c849295625..db9cea53d63 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -79,6 +79,7 @@ import org.apache.hadoop.hbase.quotas.QuotaRetriever; import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; @@ -172,6 +173,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; @@ -3832,6 +3834,35 @@ public class HBaseAdmin implements Admin { }); } + @Override + public List listReplicationPeers() throws IOException { + return listReplicationPeers((Pattern)null); + } + + @Override + public List listReplicationPeers(String regex) throws IOException { + return listReplicationPeers(Pattern.compile(regex)); + } + + @Override + public List listReplicationPeers(Pattern pattern) + throws IOException { + return executeCallable(new MasterCallable>(getConnection(), + getRpcControllerFactory()) { + @Override + protected List rpcCall() throws Exception { + List peersList = master.listReplicationPeers( + getRpcController(), RequestConverter.buildListReplicationPeersRequest(pattern)) + .getPeerDescList(); + List result = new ArrayList<>(peersList.size()); + for (ReplicationProtos.ReplicationPeerDescription peer : peersList) { + result.add(ReplicationSerDeHelper.toReplicationPeerDescription(peer)); + } + return result; + } + }); + } + @Override public void drainRegionServers(List servers) throws IOException { final List pbServers = new ArrayList(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index d0859a44542..f9ca443b3e6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -29,6 +29,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.TreeMap; import java.util.Map.Entry; import java.util.Set; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -52,6 +54,7 @@ import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeer; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; @@ -238,13 +241,19 @@ public class ReplicationAdmin implements Closeable { /** * Get the number of slave clusters the local cluster has. * @return number of slave clusters + * @throws IOException */ - public int getPeersCount() { - return this.replicationPeers.getAllPeerIds().size(); + public int getPeersCount() throws IOException { + return this.admin.listReplicationPeers().size(); } - public Map listPeerConfigs() { - return this.replicationPeers.getAllPeerConfigs(); + public Map listPeerConfigs() throws IOException { + List peers = this.admin.listReplicationPeers(); + Map result = new TreeMap(); + for (ReplicationPeerDescription peer : peers) { + result.put(peer.getPeerId(), peer.getPeerConfig()); + } + return result; } public ReplicationPeerConfig getPeerConfig(String id) throws IOException { @@ -402,8 +411,12 @@ public class ReplicationAdmin implements Closeable { * an IllegalArgumentException is thrown if it doesn't exist * @return true if replication is enabled to that peer, false if it isn't */ - public boolean getPeerState(String id) throws ReplicationException { - return this.replicationPeers.getStatusOfPeerFromBackingStore(id); + public boolean getPeerState(String id) throws ReplicationException, IOException { + List peers = admin.listReplicationPeers(id); + if (peers.isEmpty() || !id.equals(peers.get(0).getPeerId())) { + throw new ReplicationPeerNotFoundException(id); + } + return peers.get(0).isEnabled(); } @Override @@ -577,7 +590,7 @@ public class ReplicationAdmin implements Closeable { } @VisibleForTesting - List listReplicationPeers() { + List listReplicationPeers() throws IOException { Map peers = listPeerConfigs(); if (peers == null || peers.size() <= 0) { return null; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java index 9e04c9bb057..93eea17cfbf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Strings; @@ -293,7 +294,7 @@ public final class ReplicationSerDeHelper { return peerConfig; } - public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig peerConfig) { + public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig peerConfig) { ReplicationProtos.ReplicationPeer.Builder builder = ReplicationProtos.ReplicationPeer.newBuilder(); if (peerConfig.getClusterKey() != null) { builder.setClusterkey(peerConfig.getClusterKey()); @@ -343,4 +344,26 @@ public final class ReplicationSerDeHelper { byte[] bytes = convert(peerConfig).toByteArray(); return ProtobufUtil.prependPBMagic(bytes); } + + public static ReplicationPeerDescription toReplicationPeerDescription( + ReplicationProtos.ReplicationPeerDescription desc) { + boolean enabled = ReplicationProtos.ReplicationState.State.ENABLED == desc.getState() + .getState(); + ReplicationPeerConfig config = convert(desc.getConfig()); + return new ReplicationPeerDescription(desc.getId(), enabled, config); + } + + public static ReplicationProtos.ReplicationPeerDescription toProtoReplicationPeerDescription( + ReplicationPeerDescription desc) { + ReplicationProtos.ReplicationPeerDescription.Builder builder = ReplicationProtos.ReplicationPeerDescription + .newBuilder(); + builder.setId(desc.getPeerId()); + ReplicationProtos.ReplicationState.Builder stateBuilder = ReplicationProtos.ReplicationState + .newBuilder(); + stateBuilder.setState(desc.isEnabled() ? ReplicationProtos.ReplicationState.State.ENABLED + : ReplicationProtos.ReplicationState.State.DISABLED); + builder.setState(stateBuilder.build()); + builder.setConfig(convert(desc.getPeerConfig())); + return builder.build(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java new file mode 100644 index 00000000000..577d13a7b7a --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ReplicationPeerDescription { + + private final String id; + private final boolean enabled; + private final ReplicationPeerConfig config; + + public ReplicationPeerDescription(String id, boolean enabled, ReplicationPeerConfig config) { + this.id = id; + this.enabled = enabled; + this.config = config; + } + + public String getPeerId() { + return this.id; + } + + public boolean isEnabled() { + return this.enabled; + } + + public ReplicationPeerConfig getPeerConfig() { + return this.config; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder("id : ").append(id); + builder.append(", enabled : " + enabled); + builder.append(", config : " + config); + return builder.toString(); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 285e19aab94..585a5f8f39b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -91,6 +91,7 @@ import org.apache.hadoop.hbase.quotas.QuotaType; import org.apache.hadoop.hbase.quotas.ThrottleType; import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.replication.ReplicationLoadSource; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.CellVisibility; @@ -158,6 +159,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDe import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 4231a827b18..4acb525ebd3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddRe import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; import org.apache.hadoop.hbase.util.Bytes; @@ -1613,4 +1614,12 @@ public final class RequestConverter { builder.setPeerConfig(ReplicationSerDeHelper.convert(peerConfig)); return builder.build(); } + + public static ListReplicationPeersRequest buildListReplicationPeersRequest(Pattern pattern) { + ListReplicationPeersRequest.Builder builder = ListReplicationPeersRequest.newBuilder(); + if (pattern != null) { + builder.setRegex(pattern.toString()); + } + return builder.build(); + } } diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java index d56c534a452..8f293f3a785 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java @@ -69797,6 +69797,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + *
+       ** Returns a list of replication peers 
+       * 
+ * + * rpc ListReplicationPeers(.hbase.pb.ListReplicationPeersRequest) returns (.hbase.pb.ListReplicationPeersResponse); + */ + public abstract void listReplicationPeers( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** *
        ** Returns a list of ServerNames marked as draining. 
@@ -70350,6 +70362,14 @@ public final class MasterProtos {
           impl.updateReplicationPeerConfig(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void listReplicationPeers(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
+          impl.listReplicationPeers(controller, request, done);
+        }
+
         @java.lang.Override
         public  void listDrainingRegionServers(
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
@@ -70525,10 +70545,12 @@ public final class MasterProtos {
             case 63:
               return impl.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request);
             case 64:
-              return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request);
+              return impl.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request);
             case 65:
-              return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
+              return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request);
             case 66:
+              return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
+            case 67:
               return impl.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -70673,10 +70695,12 @@ public final class MasterProtos {
             case 63:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
             case 64:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
             case 65:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
             case 66:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+            case 67:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -70821,10 +70845,12 @@ public final class MasterProtos {
             case 63:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
             case 64:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
             case 65:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
             case 66:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+            case 67:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -71631,6 +71657,18 @@ public final class MasterProtos {
         org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done);
 
+    /**
+     * 
+     ** Returns a list of replication peers 
+     * 
+ * + * rpc ListReplicationPeers(.hbase.pb.ListReplicationPeersRequest) returns (.hbase.pb.ListReplicationPeersResponse); + */ + public abstract void listReplicationPeers( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** *
      ** Returns a list of ServerNames marked as draining. 
@@ -72010,16 +72048,21 @@ public final class MasterProtos {
               done));
           return;
         case 64:
+          this.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
+              done));
+          return;
+        case 65:
           this.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 65:
+        case 66:
           this.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 66:
+        case 67:
           this.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
@@ -72167,10 +72210,12 @@ public final class MasterProtos {
         case 63:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
         case 64:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
         case 65:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
         case 66:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+        case 67:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -72315,10 +72360,12 @@ public final class MasterProtos {
         case 63:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
         case 64:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
         case 65:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
         case 66:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+        case 67:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -73301,12 +73348,27 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance()));
       }
 
+      public  void listReplicationPeers(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(64),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.class,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance()));
+      }
+
       public  void listDrainingRegionServers(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(64),
+          getDescriptor().getMethods().get(65),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance(),
@@ -73321,7 +73383,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(65),
+          getDescriptor().getMethods().get(66),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance(),
@@ -73336,7 +73398,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(66),
+          getDescriptor().getMethods().get(67),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance(),
@@ -73673,6 +73735,11 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse listReplicationPeers(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
       public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse listDrainingRegionServers(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request)
@@ -74464,12 +74531,24 @@ public final class MasterProtos {
       }
 
 
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse listReplicationPeers(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(64),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance());
+      }
+
+
       public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse listDrainingRegionServers(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(64),
+          getDescriptor().getMethods().get(65),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance());
@@ -74481,7 +74560,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(65),
+          getDescriptor().getMethods().get(66),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance());
@@ -74493,7 +74572,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(66),
+          getDescriptor().getMethods().get(67),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance());
@@ -75319,7 +75398,7 @@ public final class MasterProtos {
       "\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerN" +
       "ame\"&\n$RemoveDrainFromRegionServersRespo",
       "nse*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005ME" +
-      "RGE\020\0012\2301\n\rMasterService\022e\n\024GetSchemaAlte" +
+      "RGE\020\0012\3771\n\rMasterService\022e\n\024GetSchemaAlte" +
       "rStatus\022%.hbase.pb.GetSchemaAlterStatusR" +
       "equest\032&.hbase.pb.GetSchemaAlterStatusRe" +
       "sponse\022b\n\023GetTableDescriptors\022$.hbase.pb" +
@@ -75468,17 +75547,20 @@ public final class MasterProtos {
       "\n\033UpdateReplicationPeerConfig\022,.hbase.pb" +
       ".UpdateReplicationPeerConfigRequest\032-.hb" +
       "ase.pb.UpdateReplicationPeerConfigRespon",
-      "se\022t\n\031listDrainingRegionServers\022*.hbase." +
-      "pb.ListDrainingRegionServersRequest\032+.hb" +
-      "ase.pb.ListDrainingRegionServersResponse" +
-      "\022_\n\022drainRegionServers\022#.hbase.pb.DrainR" +
-      "egionServersRequest\032$.hbase.pb.DrainRegi" +
-      "onServersResponse\022}\n\034removeDrainFromRegi" +
-      "onServers\022-.hbase.pb.RemoveDrainFromRegi" +
-      "onServersRequest\032..hbase.pb.RemoveDrainF" +
-      "romRegionServersResponseBI\n1org.apache.h" +
-      "adoop.hbase.shaded.protobuf.generatedB\014M",
-      "asterProtosH\001\210\001\001\240\001\001"
+      "se\022e\n\024ListReplicationPeers\022%.hbase.pb.Li" +
+      "stReplicationPeersRequest\032&.hbase.pb.Lis" +
+      "tReplicationPeersResponse\022t\n\031listDrainin" +
+      "gRegionServers\022*.hbase.pb.ListDrainingRe" +
+      "gionServersRequest\032+.hbase.pb.ListDraini" +
+      "ngRegionServersResponse\022_\n\022drainRegionSe" +
+      "rvers\022#.hbase.pb.DrainRegionServersReque" +
+      "st\032$.hbase.pb.DrainRegionServersResponse" +
+      "\022}\n\034removeDrainFromRegionServers\022-.hbase" +
+      ".pb.RemoveDrainFromRegionServersRequest\032",
+      "..hbase.pb.RemoveDrainFromRegionServersR" +
+      "esponseBI\n1org.apache.hadoop.hbase.shade" +
+      "d.protobuf.generatedB\014MasterProtosH\001\210\001\001\240" +
+      "\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
index e4fdfe851f1..6a6d4bf2a9d 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
@@ -3502,6 +3502,993 @@ public final class ReplicationProtos {
 
   }
 
+  public interface ReplicationPeerDescriptionOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.ReplicationPeerDescription)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * required string id = 1;
+     */
+    boolean hasId();
+    /**
+     * required string id = 1;
+     */
+    java.lang.String getId();
+    /**
+     * required string id = 1;
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getIdBytes();
+
+    /**
+     * required .hbase.pb.ReplicationState state = 2;
+     */
+    boolean hasState();
+    /**
+     * required .hbase.pb.ReplicationState state = 2;
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState getState();
+    /**
+     * required .hbase.pb.ReplicationState state = 2;
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationStateOrBuilder getStateOrBuilder();
+
+    /**
+     * required .hbase.pb.ReplicationPeer config = 3;
+     */
+    boolean hasConfig();
+    /**
+     * required .hbase.pb.ReplicationPeer config = 3;
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer getConfig();
+    /**
+     * required .hbase.pb.ReplicationPeer config = 3;
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerOrBuilder getConfigOrBuilder();
+  }
+  /**
+   * 
+   **
+   * Used by replication. Description of the replication peer.
+   * 
+ * + * Protobuf type {@code hbase.pb.ReplicationPeerDescription} + */ + public static final class ReplicationPeerDescription extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.ReplicationPeerDescription) + ReplicationPeerDescriptionOrBuilder { + // Use ReplicationPeerDescription.newBuilder() to construct. + private ReplicationPeerDescription(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ReplicationPeerDescription() { + id_ = ""; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ReplicationPeerDescription( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + id_ = bs; + break; + } + case 18: { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = state_.toBuilder(); + } + state_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(state_); + state_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = config_.toBuilder(); + } + config_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(config_); + config_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ReplicationPeerDescription_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ReplicationPeerDescription_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder.class); + } + + private int bitField0_; + public static final int ID_FIELD_NUMBER = 1; + private volatile java.lang.Object id_; + /** + * required string id = 1; + */ + public boolean hasId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string id = 1; + */ + public java.lang.String getId() { + java.lang.Object ref = id_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + id_ = s; + } + return s; + } + } + /** + * required string id = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getIdBytes() { + java.lang.Object ref = id_; + if (ref instanceof java.lang.String) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + id_ = b; + return b; + } else { + return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState state_; + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState getState() { + return state_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.getDefaultInstance() : state_; + } + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationStateOrBuilder getStateOrBuilder() { + return state_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.getDefaultInstance() : state_; + } + + public static final int CONFIG_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer config_; + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public boolean hasConfig() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer getConfig() { + return config_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.getDefaultInstance() : config_; + } + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerOrBuilder getConfigOrBuilder() { + return config_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.getDefaultInstance() : config_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasConfig()) { + memoizedIsInitialized = 0; + return false; + } + if (!getState().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getConfig().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, id_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, getState()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, getConfig()); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, id_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getState()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getConfig()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription) obj; + + boolean result = true; + result = result && (hasId() == other.hasId()); + if (hasId()) { + result = result && getId() + .equals(other.getId()); + } + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && getState() + .equals(other.getState()); + } + result = result && (hasConfig() == other.hasConfig()); + if (hasConfig()) { + result = result && getConfig() + .equals(other.getConfig()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasId()) { + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId().hashCode(); + } + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + getState().hashCode(); + } + if (hasConfig()) { + hash = (37 * hash) + CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getConfig().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
+     **
+     * Used by replication. Description of the replication peer.
+     * 
+ * + * Protobuf type {@code hbase.pb.ReplicationPeerDescription} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.ReplicationPeerDescription) + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescriptionOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ReplicationPeerDescription_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ReplicationPeerDescription_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getStateFieldBuilder(); + getConfigFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + id_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (stateBuilder_ == null) { + state_ = null; + } else { + stateBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (configBuilder_ == null) { + config_ = null; + } else { + configBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ReplicationPeerDescription_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.id_ = id_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (stateBuilder_ == null) { + result.state_ = state_; + } else { + result.state_ = stateBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (configBuilder_ == null) { + result.config_ = config_; + } else { + result.config_ = configBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.getDefaultInstance()) return this; + if (other.hasId()) { + bitField0_ |= 0x00000001; + id_ = other.id_; + onChanged(); + } + if (other.hasState()) { + mergeState(other.getState()); + } + if (other.hasConfig()) { + mergeConfig(other.getConfig()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasId()) { + return false; + } + if (!hasState()) { + return false; + } + if (!hasConfig()) { + return false; + } + if (!getState().isInitialized()) { + return false; + } + if (!getConfig().isInitialized()) { + return false; + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object id_ = ""; + /** + * required string id = 1; + */ + public boolean hasId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string id = 1; + */ + public java.lang.String getId() { + java.lang.Object ref = id_; + if (!(ref instanceof java.lang.String)) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + id_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string id = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getIdBytes() { + java.lang.Object ref = id_; + if (ref instanceof String) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + id_ = b; + return b; + } else { + return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + } + } + /** + * required string id = 1; + */ + public Builder setId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + id_ = value; + onChanged(); + return this; + } + /** + * required string id = 1; + */ + public Builder clearId() { + bitField0_ = (bitField0_ & ~0x00000001); + id_ = getDefaultInstance().getId(); + onChanged(); + return this; + } + /** + * required string id = 1; + */ + public Builder setIdBytes( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + id_ = value; + onChanged(); + return this; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState state_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationStateOrBuilder> stateBuilder_; + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState getState() { + if (stateBuilder_ == null) { + return state_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.getDefaultInstance() : state_; + } else { + return stateBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public Builder setState(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState value) { + if (stateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + state_ = value; + onChanged(); + } else { + stateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public Builder setState( + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.Builder builderForValue) { + if (stateBuilder_ == null) { + state_ = builderForValue.build(); + onChanged(); + } else { + stateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public Builder mergeState(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState value) { + if (stateBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + state_ != null && + state_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.getDefaultInstance()) { + state_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.newBuilder(state_).mergeFrom(value).buildPartial(); + } else { + state_ = value; + } + onChanged(); + } else { + stateBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public Builder clearState() { + if (stateBuilder_ == null) { + state_ = null; + onChanged(); + } else { + stateBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.Builder getStateBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getStateFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.ReplicationState state = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationStateOrBuilder getStateOrBuilder() { + if (stateBuilder_ != null) { + return stateBuilder_.getMessageOrBuilder(); + } else { + return state_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.getDefaultInstance() : state_; + } + } + /** + * required .hbase.pb.ReplicationState state = 2; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationStateOrBuilder> + getStateFieldBuilder() { + if (stateBuilder_ == null) { + stateBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationStateOrBuilder>( + getState(), + getParentForChildren(), + isClean()); + state_ = null; + } + return stateBuilder_; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer config_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerOrBuilder> configBuilder_; + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public boolean hasConfig() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer getConfig() { + if (configBuilder_ == null) { + return config_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.getDefaultInstance() : config_; + } else { + return configBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public Builder setConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer value) { + if (configBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + config_ = value; + onChanged(); + } else { + configBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public Builder setConfig( + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.Builder builderForValue) { + if (configBuilder_ == null) { + config_ = builderForValue.build(); + onChanged(); + } else { + configBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public Builder mergeConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer value) { + if (configBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + config_ != null && + config_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.getDefaultInstance()) { + config_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.newBuilder(config_).mergeFrom(value).buildPartial(); + } else { + config_ = value; + } + onChanged(); + } else { + configBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public Builder clearConfig() { + if (configBuilder_ == null) { + config_ = null; + onChanged(); + } else { + configBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.Builder getConfigBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getConfigFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerOrBuilder getConfigOrBuilder() { + if (configBuilder_ != null) { + return configBuilder_.getMessageOrBuilder(); + } else { + return config_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.getDefaultInstance() : config_; + } + } + /** + * required .hbase.pb.ReplicationPeer config = 3; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerOrBuilder> + getConfigFieldBuilder() { + if (configBuilder_ == null) { + configBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerOrBuilder>( + getConfig(), + getParentForChildren(), + isClean()); + config_ = null; + } + return configBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.ReplicationPeerDescription) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ReplicationPeerDescription) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public ReplicationPeerDescription parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new ReplicationPeerDescription(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + public interface ReplicationHLogPositionOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.ReplicationHLogPosition) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -10347,6 +11334,1304 @@ public final class ReplicationProtos { } + public interface ListReplicationPeersRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.ListReplicationPeersRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * optional string regex = 1; + */ + boolean hasRegex(); + /** + * optional string regex = 1; + */ + java.lang.String getRegex(); + /** + * optional string regex = 1; + */ + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getRegexBytes(); + } + /** + * Protobuf type {@code hbase.pb.ListReplicationPeersRequest} + */ + public static final class ListReplicationPeersRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.ListReplicationPeersRequest) + ListReplicationPeersRequestOrBuilder { + // Use ListReplicationPeersRequest.newBuilder() to construct. + private ListReplicationPeersRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListReplicationPeersRequest() { + regex_ = ""; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListReplicationPeersRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + regex_ = bs; + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ListReplicationPeersRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ListReplicationPeersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.Builder.class); + } + + private int bitField0_; + public static final int REGEX_FIELD_NUMBER = 1; + private volatile java.lang.Object regex_; + /** + * optional string regex = 1; + */ + public boolean hasRegex() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string regex = 1; + */ + public java.lang.String getRegex() { + java.lang.Object ref = regex_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + regex_ = s; + } + return s; + } + } + /** + * optional string regex = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getRegexBytes() { + java.lang.Object ref = regex_; + if (ref instanceof java.lang.String) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + regex_ = b; + return b; + } else { + return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, regex_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, regex_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest) obj; + + boolean result = true; + result = result && (hasRegex() == other.hasRegex()); + if (hasRegex()) { + result = result && getRegex() + .equals(other.getRegex()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegex()) { + hash = (37 * hash) + REGEX_FIELD_NUMBER; + hash = (53 * hash) + getRegex().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListReplicationPeersRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.ListReplicationPeersRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ListReplicationPeersRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ListReplicationPeersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + regex_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ListReplicationPeersRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.regex_ = regex_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance()) return this; + if (other.hasRegex()) { + bitField0_ |= 0x00000001; + regex_ = other.regex_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object regex_ = ""; + /** + * optional string regex = 1; + */ + public boolean hasRegex() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string regex = 1; + */ + public java.lang.String getRegex() { + java.lang.Object ref = regex_; + if (!(ref instanceof java.lang.String)) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + regex_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string regex = 1; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getRegexBytes() { + java.lang.Object ref = regex_; + if (ref instanceof String) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + regex_ = b; + return b; + } else { + return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + } + } + /** + * optional string regex = 1; + */ + public Builder setRegex( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + regex_ = value; + onChanged(); + return this; + } + /** + * optional string regex = 1; + */ + public Builder clearRegex() { + bitField0_ = (bitField0_ & ~0x00000001); + regex_ = getDefaultInstance().getRegex(); + onChanged(); + return this; + } + /** + * optional string regex = 1; + */ + public Builder setRegexBytes( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + regex_ = value; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListReplicationPeersRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListReplicationPeersRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public ListReplicationPeersRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new ListReplicationPeersRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface ListReplicationPeersResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.ListReplicationPeersResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + java.util.List + getPeerDescList(); + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription getPeerDesc(int index); + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + int getPeerDescCount(); + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + java.util.List + getPeerDescOrBuilderList(); + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescriptionOrBuilder getPeerDescOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.ListReplicationPeersResponse} + */ + public static final class ListReplicationPeersResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.ListReplicationPeersResponse) + ListReplicationPeersResponseOrBuilder { + // Use ListReplicationPeersResponse.newBuilder() to construct. + private ListReplicationPeersResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListReplicationPeersResponse() { + peerDesc_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListReplicationPeersResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + peerDesc_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + peerDesc_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.PARSER, extensionRegistry)); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + peerDesc_ = java.util.Collections.unmodifiableList(peerDesc_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ListReplicationPeersResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ListReplicationPeersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.Builder.class); + } + + public static final int PEER_DESC_FIELD_NUMBER = 1; + private java.util.List peerDesc_; + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public java.util.List getPeerDescList() { + return peerDesc_; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public java.util.List + getPeerDescOrBuilderList() { + return peerDesc_; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public int getPeerDescCount() { + return peerDesc_.size(); + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription getPeerDesc(int index) { + return peerDesc_.get(index); + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescriptionOrBuilder getPeerDescOrBuilder( + int index) { + return peerDesc_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + for (int i = 0; i < getPeerDescCount(); i++) { + if (!getPeerDesc(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < peerDesc_.size(); i++) { + output.writeMessage(1, peerDesc_.get(i)); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < peerDesc_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, peerDesc_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse) obj; + + boolean result = true; + result = result && getPeerDescList() + .equals(other.getPeerDescList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getPeerDescCount() > 0) { + hash = (37 * hash) + PEER_DESC_FIELD_NUMBER; + hash = (53 * hash) + getPeerDescList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListReplicationPeersResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.ListReplicationPeersResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ListReplicationPeersResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ListReplicationPeersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getPeerDescFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (peerDescBuilder_ == null) { + peerDesc_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + peerDescBuilder_.clear(); + } + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_ListReplicationPeersResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse(this); + int from_bitField0_ = bitField0_; + if (peerDescBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + peerDesc_ = java.util.Collections.unmodifiableList(peerDesc_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.peerDesc_ = peerDesc_; + } else { + result.peerDesc_ = peerDescBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance()) return this; + if (peerDescBuilder_ == null) { + if (!other.peerDesc_.isEmpty()) { + if (peerDesc_.isEmpty()) { + peerDesc_ = other.peerDesc_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePeerDescIsMutable(); + peerDesc_.addAll(other.peerDesc_); + } + onChanged(); + } + } else { + if (!other.peerDesc_.isEmpty()) { + if (peerDescBuilder_.isEmpty()) { + peerDescBuilder_.dispose(); + peerDescBuilder_ = null; + peerDesc_ = other.peerDesc_; + bitField0_ = (bitField0_ & ~0x00000001); + peerDescBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getPeerDescFieldBuilder() : null; + } else { + peerDescBuilder_.addAllMessages(other.peerDesc_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getPeerDescCount(); i++) { + if (!getPeerDesc(i).isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List peerDesc_ = + java.util.Collections.emptyList(); + private void ensurePeerDescIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + peerDesc_ = new java.util.ArrayList(peerDesc_); + bitField0_ |= 0x00000001; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescriptionOrBuilder> peerDescBuilder_; + + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public java.util.List getPeerDescList() { + if (peerDescBuilder_ == null) { + return java.util.Collections.unmodifiableList(peerDesc_); + } else { + return peerDescBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public int getPeerDescCount() { + if (peerDescBuilder_ == null) { + return peerDesc_.size(); + } else { + return peerDescBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription getPeerDesc(int index) { + if (peerDescBuilder_ == null) { + return peerDesc_.get(index); + } else { + return peerDescBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public Builder setPeerDesc( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription value) { + if (peerDescBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePeerDescIsMutable(); + peerDesc_.set(index, value); + onChanged(); + } else { + peerDescBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public Builder setPeerDesc( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder builderForValue) { + if (peerDescBuilder_ == null) { + ensurePeerDescIsMutable(); + peerDesc_.set(index, builderForValue.build()); + onChanged(); + } else { + peerDescBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public Builder addPeerDesc(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription value) { + if (peerDescBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePeerDescIsMutable(); + peerDesc_.add(value); + onChanged(); + } else { + peerDescBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public Builder addPeerDesc( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription value) { + if (peerDescBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePeerDescIsMutable(); + peerDesc_.add(index, value); + onChanged(); + } else { + peerDescBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public Builder addPeerDesc( + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder builderForValue) { + if (peerDescBuilder_ == null) { + ensurePeerDescIsMutable(); + peerDesc_.add(builderForValue.build()); + onChanged(); + } else { + peerDescBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public Builder addPeerDesc( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder builderForValue) { + if (peerDescBuilder_ == null) { + ensurePeerDescIsMutable(); + peerDesc_.add(index, builderForValue.build()); + onChanged(); + } else { + peerDescBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public Builder addAllPeerDesc( + java.lang.Iterable values) { + if (peerDescBuilder_ == null) { + ensurePeerDescIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, peerDesc_); + onChanged(); + } else { + peerDescBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public Builder clearPeerDesc() { + if (peerDescBuilder_ == null) { + peerDesc_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + peerDescBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public Builder removePeerDesc(int index) { + if (peerDescBuilder_ == null) { + ensurePeerDescIsMutable(); + peerDesc_.remove(index); + onChanged(); + } else { + peerDescBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder getPeerDescBuilder( + int index) { + return getPeerDescFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescriptionOrBuilder getPeerDescOrBuilder( + int index) { + if (peerDescBuilder_ == null) { + return peerDesc_.get(index); } else { + return peerDescBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public java.util.List + getPeerDescOrBuilderList() { + if (peerDescBuilder_ != null) { + return peerDescBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(peerDesc_); + } + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder addPeerDescBuilder() { + return getPeerDescFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder addPeerDescBuilder( + int index) { + return getPeerDescFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ReplicationPeerDescription peer_desc = 1; + */ + public java.util.List + getPeerDescBuilderList() { + return getPeerDescFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescriptionOrBuilder> + getPeerDescFieldBuilder() { + if (peerDescBuilder_ == null) { + peerDescBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerDescriptionOrBuilder>( + peerDesc_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + peerDesc_ = null; + } + return peerDescBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListReplicationPeersResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListReplicationPeersResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public ListReplicationPeersResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new ListReplicationPeersResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_TableCF_descriptor; private static final @@ -10362,6 +12647,11 @@ public final class ReplicationProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_ReplicationState_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ReplicationPeerDescription_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ReplicationPeerDescription_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_ReplicationHLogPosition_descriptor; private static final @@ -10427,6 +12717,16 @@ public final class ReplicationProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_UpdateReplicationPeerConfigResponse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ListReplicationPeersRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ListReplicationPeersRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ListReplicationPeersResponse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ListReplicationPeersResponse_fieldAccessorTable; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -10447,27 +12747,33 @@ public final class ReplicationProtos { "spaces\030\006 \003(\014\022\021\n\tbandwidth\030\007 \001(\003\"g\n\020Repli" + "cationState\022/\n\005state\030\001 \002(\0162 .hbase.pb.Re", "plicationState.State\"\"\n\005State\022\013\n\007ENABLED" + - "\020\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHLogPosit" + - "ion\022\020\n\010position\030\001 \002(\003\"\\\n\031AddReplicationP" + - "eerRequest\022\017\n\007peer_id\030\001 \002(\t\022.\n\013peer_conf" + - "ig\030\002 \002(\0132\031.hbase.pb.ReplicationPeer\"\034\n\032A" + - "ddReplicationPeerResponse\"/\n\034RemoveRepli" + - "cationPeerRequest\022\017\n\007peer_id\030\001 \002(\t\"\037\n\035Re" + - "moveReplicationPeerResponse\"/\n\034EnableRep" + - "licationPeerRequest\022\017\n\007peer_id\030\001 \002(\t\"\037\n\035" + - "EnableReplicationPeerResponse\"0\n\035Disable", - "ReplicationPeerRequest\022\017\n\007peer_id\030\001 \002(\t\"" + - " \n\036DisableReplicationPeerResponse\"2\n\037Get" + - "ReplicationPeerConfigRequest\022\017\n\007peer_id\030" + - "\001 \002(\t\"c\n GetReplicationPeerConfigRespons" + - "e\022\017\n\007peer_id\030\001 \002(\t\022.\n\013peer_config\030\002 \002(\0132" + - "\031.hbase.pb.ReplicationPeer\"e\n\"UpdateRepl" + - "icationPeerConfigRequest\022\017\n\007peer_id\030\001 \002(" + - "\t\022.\n\013peer_config\030\002 \002(\0132\031.hbase.pb.Replic" + - "ationPeer\"%\n#UpdateReplicationPeerConfig" + - "ResponseBN\n1org.apache.hadoop.hbase.shad", - "ed.protobuf.generatedB\021ReplicationProtos" + - "H\001\210\001\001\240\001\001" + "\020\000\022\014\n\010DISABLED\020\001\"~\n\032ReplicationPeerDescr" + + "iption\022\n\n\002id\030\001 \002(\t\022)\n\005state\030\002 \002(\0132\032.hbas" + + "e.pb.ReplicationState\022)\n\006config\030\003 \002(\0132\031." + + "hbase.pb.ReplicationPeer\"+\n\027ReplicationH" + + "LogPosition\022\020\n\010position\030\001 \002(\003\"\\\n\031AddRepl" + + "icationPeerRequest\022\017\n\007peer_id\030\001 \002(\t\022.\n\013p" + + "eer_config\030\002 \002(\0132\031.hbase.pb.ReplicationP" + + "eer\"\034\n\032AddReplicationPeerResponse\"/\n\034Rem" + + "oveReplicationPeerRequest\022\017\n\007peer_id\030\001 \002", + "(\t\"\037\n\035RemoveReplicationPeerResponse\"/\n\034E" + + "nableReplicationPeerRequest\022\017\n\007peer_id\030\001" + + " \002(\t\"\037\n\035EnableReplicationPeerResponse\"0\n" + + "\035DisableReplicationPeerRequest\022\017\n\007peer_i" + + "d\030\001 \002(\t\" \n\036DisableReplicationPeerRespons" + + "e\"2\n\037GetReplicationPeerConfigRequest\022\017\n\007" + + "peer_id\030\001 \002(\t\"c\n GetReplicationPeerConfi" + + "gResponse\022\017\n\007peer_id\030\001 \002(\t\022.\n\013peer_confi" + + "g\030\002 \002(\0132\031.hbase.pb.ReplicationPeer\"e\n\"Up" + + "dateReplicationPeerConfigRequest\022\017\n\007peer", + "_id\030\001 \002(\t\022.\n\013peer_config\030\002 \002(\0132\031.hbase.p" + + "b.ReplicationPeer\"%\n#UpdateReplicationPe" + + "erConfigResponse\",\n\033ListReplicationPeers" + + "Request\022\r\n\005regex\030\001 \001(\t\"W\n\034ListReplicatio" + + "nPeersResponse\0227\n\tpeer_desc\030\001 \003(\0132$.hbas" + + "e.pb.ReplicationPeerDescriptionBN\n1org.a" + + "pache.hadoop.hbase.shaded.protobuf.gener" + + "atedB\021ReplicationProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -10500,84 +12806,102 @@ public final class ReplicationProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ReplicationState_descriptor, new java.lang.String[] { "State", }); - internal_static_hbase_pb_ReplicationHLogPosition_descriptor = + internal_static_hbase_pb_ReplicationPeerDescription_descriptor = getDescriptor().getMessageTypes().get(3); + internal_static_hbase_pb_ReplicationPeerDescription_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_ReplicationPeerDescription_descriptor, + new java.lang.String[] { "Id", "State", "Config", }); + internal_static_hbase_pb_ReplicationHLogPosition_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_ReplicationHLogPosition_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ReplicationHLogPosition_descriptor, new java.lang.String[] { "Position", }); internal_static_hbase_pb_AddReplicationPeerRequest_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_hbase_pb_AddReplicationPeerRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AddReplicationPeerRequest_descriptor, new java.lang.String[] { "PeerId", "PeerConfig", }); internal_static_hbase_pb_AddReplicationPeerResponse_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_hbase_pb_AddReplicationPeerResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AddReplicationPeerResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(7); internal_static_hbase_pb_RemoveReplicationPeerRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor, new java.lang.String[] { "PeerId", }); internal_static_hbase_pb_RemoveReplicationPeerResponse_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(8); internal_static_hbase_pb_RemoveReplicationPeerResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RemoveReplicationPeerResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_EnableReplicationPeerRequest_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(9); internal_static_hbase_pb_EnableReplicationPeerRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_EnableReplicationPeerRequest_descriptor, new java.lang.String[] { "PeerId", }); internal_static_hbase_pb_EnableReplicationPeerResponse_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(10); internal_static_hbase_pb_EnableReplicationPeerResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_EnableReplicationPeerResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_DisableReplicationPeerRequest_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(11); internal_static_hbase_pb_DisableReplicationPeerRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DisableReplicationPeerRequest_descriptor, new java.lang.String[] { "PeerId", }); internal_static_hbase_pb_DisableReplicationPeerResponse_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(12); internal_static_hbase_pb_DisableReplicationPeerResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_DisableReplicationPeerResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetReplicationPeerConfigRequest_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(13); internal_static_hbase_pb_GetReplicationPeerConfigRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetReplicationPeerConfigRequest_descriptor, new java.lang.String[] { "PeerId", }); internal_static_hbase_pb_GetReplicationPeerConfigResponse_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(14); internal_static_hbase_pb_GetReplicationPeerConfigResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetReplicationPeerConfigResponse_descriptor, new java.lang.String[] { "PeerId", "PeerConfig", }); internal_static_hbase_pb_UpdateReplicationPeerConfigRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_UpdateReplicationPeerConfigRequest_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateReplicationPeerConfigRequest_descriptor, new java.lang.String[] { "PeerId", "PeerConfig", }); internal_static_hbase_pb_UpdateReplicationPeerConfigResponse_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(16); internal_static_hbase_pb_UpdateReplicationPeerConfigResponse_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UpdateReplicationPeerConfigResponse_descriptor, new java.lang.String[] { }); + internal_static_hbase_pb_ListReplicationPeersRequest_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_hbase_pb_ListReplicationPeersRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_ListReplicationPeersRequest_descriptor, + new java.lang.String[] { "Regex", }); + internal_static_hbase_pb_ListReplicationPeersResponse_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_hbase_pb_ListReplicationPeersResponse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_ListReplicationPeersResponse_descriptor, + new java.lang.String[] { "PeerDesc", }); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(); } diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto index f553e8f1ef6..7cd9921ce68 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto @@ -893,6 +893,10 @@ service MasterService { rpc UpdateReplicationPeerConfig(UpdateReplicationPeerConfigRequest) returns(UpdateReplicationPeerConfigResponse); + /** Returns a list of replication peers */ + rpc ListReplicationPeers(ListReplicationPeersRequest) + returns(ListReplicationPeersResponse); + /** Returns a list of ServerNames marked as draining. */ rpc listDrainingRegionServers(ListDrainingRegionServersRequest) returns(ListDrainingRegionServersResponse); diff --git a/hbase-protocol-shaded/src/main/protobuf/Replication.proto b/hbase-protocol-shaded/src/main/protobuf/Replication.proto index c7485144453..4cb4436bed9 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Replication.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Replication.proto @@ -57,6 +57,15 @@ message ReplicationState { required State state = 1; } +/** + * Used by replication. Description of the replication peer. + */ +message ReplicationPeerDescription { + required string id = 1; + required ReplicationState state = 2; + required ReplicationPeer config = 3; +} + /** * Used by replication. Holds the current position in an WAL file. */ @@ -109,3 +118,11 @@ message UpdateReplicationPeerConfigRequest { message UpdateReplicationPeerConfigResponse { } + +message ListReplicationPeersRequest { + optional string regex = 1; +} + +message ListReplicationPeersResponse { + repeated ReplicationPeerDescription peer_desc = 1; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index c42c7b7ff7b..b0569e8b6b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -1951,4 +1951,24 @@ public interface MasterObserver extends Coprocessor { final ObserverContext ctx, String peerId, ReplicationPeerConfig peerConfig) throws IOException { } + + /** + * Called before list replication peers. + * @param ctx + * @param regex The regular expression to match peer id + * @throws IOException on failure + */ + default void preListReplicationPeers(final ObserverContext ctx, + String regex) throws IOException { + } + + /** + * Called after list replication peers. + * @param ctx + * @param regex The regular expression to match peer id + * @throws IOException on failure + */ + default void postListReplicationPeers(final ObserverContext ctx, + String regex) throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a87c38e6b50..75da73f8043 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -142,6 +142,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl; import org.apache.hadoop.hbase.replication.master.TableCFsUpdater; import org.apache.hadoop.hbase.replication.regionserver.Replication; @@ -3224,6 +3225,21 @@ public class HMaster extends HRegionServer implements MasterServices { } } + @Override + public List listReplicationPeers(String regex) + throws ReplicationException, IOException { + if (cpHost != null) { + cpHost.preListReplicationPeers(regex); + } + LOG.info(getClientIdAuditPrefix() + " list replication peers, regex=" + regex); + Pattern pattern = regex == null ? null : Pattern.compile(regex); + List peers = this.replicationManager.listReplicationPeers(pattern); + if (cpHost != null) { + cpHost.postListReplicationPeers(regex); + } + return peers; + } + @Override public void drainRegionServer(final ServerName server) { String parentZnode = getZooKeeper().znodePaths.drainingZNode; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 4e3987ec5cc..2b6ae39f3d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -1769,4 +1769,24 @@ public class MasterCoprocessorHost } }); } + + public void preListReplicationPeers(final String regex) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver observer, ObserverContext ctx) + throws IOException { + observer.preListReplicationPeers(ctx, regex); + } + }); + } + + public void postListReplicationPeers(final String regex) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver observer, ObserverContext ctx) + throws IOException { + observer.postListReplicationPeers(ctx, regex); + } + }); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 707cb39870e..732e678c68c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -95,6 +95,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Enabl import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; @@ -102,6 +104,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Updat import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessController; import org.apache.hadoop.hbase.security.visibility.VisibilityController; @@ -1727,6 +1730,22 @@ public class MasterRpcServices extends RSRpcServices } } + @Override + public ListReplicationPeersResponse listReplicationPeers(RpcController controller, + ListReplicationPeersRequest request) throws ServiceException { + ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder(); + try { + List peers = master + .listReplicationPeers(request.hasRegex() ? request.getRegex() : null); + for (ReplicationPeerDescription peer : peers) { + response.addPeerDesc(ReplicationSerDeHelper.toProtoReplicationPeerDescription(peer)); + } + } catch (ReplicationException | IOException e) { + throw new ServiceException(e); + } + return response.build(); + } + @Override public ListDrainingRegionServersResponse listDrainingRegionServers(RpcController controller, ListDrainingRegionServersRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 7686fc8ebee..060bdd86833 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import com.google.protobuf.Service; @@ -461,6 +462,14 @@ public interface MasterServices extends Server { void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) throws ReplicationException, IOException; + /** + * Return a list of replication peers. + * @param regex The regular expression to match peer id + * @return a list of replication peers description + */ + List listReplicationPeers(String regex) throws ReplicationException, + IOException; + /** * Mark a region server as draining to prevent additional regions from getting assigned to it. * @param server Region servers to drain. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java index f00730dc77b..139a38015dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java @@ -18,9 +18,12 @@ package org.apache.hadoop.hbase.master.replication; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; @@ -30,6 +33,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; @@ -100,6 +104,20 @@ public class ReplicationManager { this.replicationPeers.updatePeerConfig(peerId, peerConfig); } + public List listReplicationPeers(Pattern pattern) + throws ReplicationException { + List peers = new ArrayList<>(); + List peerIds = replicationPeers.getAllPeerIds(); + for (String peerId : peerIds) { + if (pattern == null || (pattern != null && pattern.matcher(peerId).matches())) { + peers.add(new ReplicationPeerDescription(peerId, replicationPeers + .getStatusOfPeerFromBackingStore(peerId), replicationPeers + .getReplicationPeerConfig(peerId))); + } + } + return peers; + } + /** * Set a namespace in the peer config means that all tables in this namespace * will be replicated to the peer cluster. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 0564ece1293..50118195a02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -2733,4 +2733,10 @@ public class AccessController extends BaseMasterAndRegionObserver ReplicationPeerConfig peerConfig) throws IOException { requirePermission(getActiveUser(ctx), "updateReplicationPeerConfig", Action.ADMIN); } -} + + @Override + public void preListReplicationPeers(final ObserverContext ctx, + String regex) throws IOException { + requirePermission(getActiveUser(ctx), "listReplicationPeers", Action.ADMIN); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java index 15d51507ee9..435e20b8039 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.replication.ReplicationException; @@ -207,7 +208,7 @@ public class TestReplicationAdmin { assertFalse(admin.getPeerState(ID_ONE)); try { admin.getPeerState(ID_SECOND); - } catch (IllegalArgumentException iae) { + } catch (ReplicationPeerNotFoundException e) { // OK! } admin.removePeer(ID_ONE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 36dc2e20567..10368b405c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -425,4 +426,10 @@ public class MockNoopMasterServices implements MasterServices, Server { public void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) throws ReplicationException, IOException { } + + @Override + public List listReplicationPeers(String regex) + throws ReplicationException, IOException { + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 71076695469..5dcea53e2a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -2961,4 +2961,19 @@ public class TestAccessController extends SecureTestUtil { verifyAllowed(action, SUPERUSER, USER_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); } + + @Test + public void testListReplicationPeers() throws Exception { + AccessTestAction action = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preListReplicationPeers(ObserverContext.createAndPrepare(CP_ENV, null), + "test"); + return null; + } + }; + + verifyAllowed(action, SUPERUSER, USER_ADMIN); + verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } } diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc index e9026188561..1d7c748da90 100644 --- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc +++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc @@ -122,6 +122,7 @@ In case the table goes out of date, the unit tests which check for accuracy of p | | disableReplicationPeer | superuser\|global(A) | | getReplicationPeerConfig | superuser\|global(A) | | updateReplicationPeerConfig | superuser\|global(A) +| | listReplicationPeers | superuser\|global(A) | Region | openRegion | superuser\|global(A) | | closeRegion | superuser\|global(A) | | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)