HBASE-17337 list replication peers request should be routed through master

This commit is contained in:
Guanghao Zhang 2017-01-10 08:57:26 +08:00
parent bd157ffe9a
commit ac3b1c9aa9
23 changed files with 2809 additions and 74 deletions

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Future;
@ -48,6 +49,7 @@ import org.apache.hadoop.hbase.quotas.QuotaRetriever;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@ -1879,6 +1881,35 @@ public interface Admin extends Abortable, Closeable {
final ReplicationPeerConfig peerConfig) throws IOException {
}
/**
* Return a list of replication peers.
* @return a list of replication peers description
* @throws IOException
*/
default List<ReplicationPeerDescription> listReplicationPeers() throws IOException {
return new ArrayList<>();
}
/**
* Return a list of replication peers.
* @param regex The regular expression to match peer id
* @return a list of replication peers description
* @throws IOException
*/
default List<ReplicationPeerDescription> listReplicationPeers(String regex) throws IOException {
return new ArrayList<>();
}
/**
* Return a list of replication peers.
* @param pattern The compiled regular expression to match peer id
* @return a list of replication peers description
* @throws IOException
*/
default List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws IOException {
return new ArrayList<>();
}
/**
* Mark a region server as draining to prevent additional regions from getting assigned to it.
* @param servers List of region servers to drain.

View File

@ -102,6 +102,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Enabl
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
@ -1711,6 +1713,12 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
throws ServiceException {
return stub.updateReplicationPeerConfig(controller, request);
}
@Override
public ListReplicationPeersResponse listReplicationPeers(RpcController controller,
ListReplicationPeersRequest request) throws ServiceException {
return stub.listReplicationPeers(controller, request);
}
};
}

View File

@ -79,6 +79,7 @@ import org.apache.hadoop.hbase.quotas.QuotaRetriever;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@ -172,6 +173,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
@ -3832,6 +3834,35 @@ public class HBaseAdmin implements Admin {
});
}
@Override
public List<ReplicationPeerDescription> listReplicationPeers() throws IOException {
return listReplicationPeers((Pattern)null);
}
@Override
public List<ReplicationPeerDescription> listReplicationPeers(String regex) throws IOException {
return listReplicationPeers(Pattern.compile(regex));
}
@Override
public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern)
throws IOException {
return executeCallable(new MasterCallable<List<ReplicationPeerDescription>>(getConnection(),
getRpcControllerFactory()) {
@Override
protected List<ReplicationPeerDescription> rpcCall() throws Exception {
List<ReplicationProtos.ReplicationPeerDescription> peersList = master.listReplicationPeers(
getRpcController(), RequestConverter.buildListReplicationPeersRequest(pattern))
.getPeerDescList();
List<ReplicationPeerDescription> result = new ArrayList<>(peersList.size());
for (ReplicationProtos.ReplicationPeerDescription peer : peersList) {
result.add(ReplicationSerDeHelper.toReplicationPeerDescription(peer));
}
return result;
}
});
}
@Override
public void drainRegionServers(List<ServerName> servers) throws IOException {
final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>();

View File

@ -29,6 +29,7 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.Map.Entry;
import java.util.Set;
@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -52,6 +54,7 @@ import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationPeer;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
import org.apache.hadoop.hbase.replication.ReplicationPeers;
import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
@ -238,13 +241,19 @@ public class ReplicationAdmin implements Closeable {
/**
* Get the number of slave clusters the local cluster has.
* @return number of slave clusters
* @throws IOException
*/
public int getPeersCount() {
return this.replicationPeers.getAllPeerIds().size();
public int getPeersCount() throws IOException {
return this.admin.listReplicationPeers().size();
}
public Map<String, ReplicationPeerConfig> listPeerConfigs() {
return this.replicationPeers.getAllPeerConfigs();
public Map<String, ReplicationPeerConfig> listPeerConfigs() throws IOException {
List<ReplicationPeerDescription> peers = this.admin.listReplicationPeers();
Map<String, ReplicationPeerConfig> result = new TreeMap<String, ReplicationPeerConfig>();
for (ReplicationPeerDescription peer : peers) {
result.put(peer.getPeerId(), peer.getPeerConfig());
}
return result;
}
public ReplicationPeerConfig getPeerConfig(String id) throws IOException {
@ -402,8 +411,12 @@ public class ReplicationAdmin implements Closeable {
* an IllegalArgumentException is thrown if it doesn't exist
* @return true if replication is enabled to that peer, false if it isn't
*/
public boolean getPeerState(String id) throws ReplicationException {
return this.replicationPeers.getStatusOfPeerFromBackingStore(id);
public boolean getPeerState(String id) throws ReplicationException, IOException {
List<ReplicationPeerDescription> peers = admin.listReplicationPeers(id);
if (peers.isEmpty() || !id.equals(peers.get(0).getPeerId())) {
throw new ReplicationPeerNotFoundException(id);
}
return peers.get(0).isEnabled();
}
@Override
@ -577,7 +590,7 @@ public class ReplicationAdmin implements Closeable {
}
@VisibleForTesting
List<ReplicationPeer> listReplicationPeers() {
List<ReplicationPeer> listReplicationPeers() throws IOException {
Map<String, ReplicationPeerConfig> peers = listPeerConfigs();
if (peers == null || peers.size() <= 0) {
return null;

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Strings;
@ -293,7 +294,7 @@ public final class ReplicationSerDeHelper {
return peerConfig;
}
public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig peerConfig) {
public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig peerConfig) {
ReplicationProtos.ReplicationPeer.Builder builder = ReplicationProtos.ReplicationPeer.newBuilder();
if (peerConfig.getClusterKey() != null) {
builder.setClusterkey(peerConfig.getClusterKey());
@ -343,4 +344,26 @@ public final class ReplicationSerDeHelper {
byte[] bytes = convert(peerConfig).toByteArray();
return ProtobufUtil.prependPBMagic(bytes);
}
public static ReplicationPeerDescription toReplicationPeerDescription(
ReplicationProtos.ReplicationPeerDescription desc) {
boolean enabled = ReplicationProtos.ReplicationState.State.ENABLED == desc.getState()
.getState();
ReplicationPeerConfig config = convert(desc.getConfig());
return new ReplicationPeerDescription(desc.getId(), enabled, config);
}
public static ReplicationProtos.ReplicationPeerDescription toProtoReplicationPeerDescription(
ReplicationPeerDescription desc) {
ReplicationProtos.ReplicationPeerDescription.Builder builder = ReplicationProtos.ReplicationPeerDescription
.newBuilder();
builder.setId(desc.getPeerId());
ReplicationProtos.ReplicationState.Builder stateBuilder = ReplicationProtos.ReplicationState
.newBuilder();
stateBuilder.setState(desc.isEnabled() ? ReplicationProtos.ReplicationState.State.ENABLED
: ReplicationProtos.ReplicationState.State.DISABLED);
builder.setState(stateBuilder.build());
builder.setConfig(convert(desc.getPeerConfig()));
return builder.build();
}
}

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* The POJO equivalent of ReplicationProtos.ReplicationPeerDescription
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ReplicationPeerDescription {
private final String id;
private final boolean enabled;
private final ReplicationPeerConfig config;
public ReplicationPeerDescription(String id, boolean enabled, ReplicationPeerConfig config) {
this.id = id;
this.enabled = enabled;
this.config = config;
}
public String getPeerId() {
return this.id;
}
public boolean isEnabled() {
return this.enabled;
}
public ReplicationPeerConfig getPeerConfig() {
return this.config;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder("id : ").append(id);
builder.append(", enabled : " + enabled);
builder.append(", config : " + config);
return builder.toString();
}
}

View File

@ -91,6 +91,7 @@ import org.apache.hadoop.hbase.quotas.QuotaType;
import org.apache.hadoop.hbase.quotas.ThrottleType;
import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.visibility.Authorizations;
import org.apache.hadoop.hbase.security.visibility.CellVisibility;
@ -158,6 +159,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDe
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;

View File

@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddRe
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.util.Bytes;
@ -1613,4 +1614,12 @@ public final class RequestConverter {
builder.setPeerConfig(ReplicationSerDeHelper.convert(peerConfig));
return builder.build();
}
public static ListReplicationPeersRequest buildListReplicationPeersRequest(Pattern pattern) {
ListReplicationPeersRequest.Builder builder = ListReplicationPeersRequest.newBuilder();
if (pattern != null) {
builder.setRegex(pattern.toString());
}
return builder.build();
}
}

View File

@ -69797,6 +69797,18 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse> done);
/**
* <pre>
** Returns a list of replication peers
* </pre>
*
* <code>rpc ListReplicationPeers(.hbase.pb.ListReplicationPeersRequest) returns (.hbase.pb.ListReplicationPeersResponse);</code>
*/
public abstract void listReplicationPeers(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse> done);
/**
* <pre>
** Returns a list of ServerNames marked as draining.
@ -70350,6 +70362,14 @@ public final class MasterProtos {
impl.updateReplicationPeerConfig(controller, request, done);
}
@java.lang.Override
public void listReplicationPeers(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse> done) {
impl.listReplicationPeers(controller, request, done);
}
@java.lang.Override
public void listDrainingRegionServers(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
@ -70525,10 +70545,12 @@ public final class MasterProtos {
case 63:
return impl.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request);
case 64:
return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request);
return impl.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request);
case 65:
return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request);
case 66:
return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
case 67:
return impl.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
@ -70673,10 +70695,12 @@ public final class MasterProtos {
case 63:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
case 64:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
case 65:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
case 66:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
case 67:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@ -70821,10 +70845,12 @@ public final class MasterProtos {
case 63:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
case 64:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
case 65:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
case 66:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
case 67:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@ -71631,6 +71657,18 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse> done);
/**
* <pre>
** Returns a list of replication peers
* </pre>
*
* <code>rpc ListReplicationPeers(.hbase.pb.ListReplicationPeersRequest) returns (.hbase.pb.ListReplicationPeersResponse);</code>
*/
public abstract void listReplicationPeers(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse> done);
/**
* <pre>
** Returns a list of ServerNames marked as draining.
@ -72010,16 +72048,21 @@ public final class MasterProtos {
done));
return;
case 64:
this.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse>specializeCallback(
done));
return;
case 65:
this.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse>specializeCallback(
done));
return;
case 65:
case 66:
this.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse>specializeCallback(
done));
return;
case 66:
case 67:
this.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse>specializeCallback(
done));
@ -72167,10 +72210,12 @@ public final class MasterProtos {
case 63:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
case 64:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
case 65:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
case 66:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
case 67:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@ -72315,10 +72360,12 @@ public final class MasterProtos {
case 63:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
case 64:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
case 65:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
case 66:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
case 67:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@ -73301,12 +73348,27 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance()));
}
public void listReplicationPeers(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse> done) {
channel.callMethod(
getDescriptor().getMethods().get(64),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance(),
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.class,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance()));
}
public void listDrainingRegionServers(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse> done) {
channel.callMethod(
getDescriptor().getMethods().get(64),
getDescriptor().getMethods().get(65),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance(),
@ -73321,7 +73383,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse> done) {
channel.callMethod(
getDescriptor().getMethods().get(65),
getDescriptor().getMethods().get(66),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance(),
@ -73336,7 +73398,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse> done) {
channel.callMethod(
getDescriptor().getMethods().get(66),
getDescriptor().getMethods().get(67),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance(),
@ -73673,6 +73735,11 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse listReplicationPeers(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse listDrainingRegionServers(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request)
@ -74464,12 +74531,24 @@ public final class MasterProtos {
}
public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse listReplicationPeers(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse) channel.callBlockingMethod(
getDescriptor().getMethods().get(64),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance());
}
public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse listDrainingRegionServers(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse) channel.callBlockingMethod(
getDescriptor().getMethods().get(64),
getDescriptor().getMethods().get(65),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance());
@ -74481,7 +74560,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse) channel.callBlockingMethod(
getDescriptor().getMethods().get(65),
getDescriptor().getMethods().get(66),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance());
@ -74493,7 +74572,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse) channel.callBlockingMethod(
getDescriptor().getMethods().get(66),
getDescriptor().getMethods().get(67),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance());
@ -75319,7 +75398,7 @@ public final class MasterProtos {
"\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerN" +
"ame\"&\n$RemoveDrainFromRegionServersRespo",
"nse*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005ME" +
"RGE\020\0012\2301\n\rMasterService\022e\n\024GetSchemaAlte" +
"RGE\020\0012\3771\n\rMasterService\022e\n\024GetSchemaAlte" +
"rStatus\022%.hbase.pb.GetSchemaAlterStatusR" +
"equest\032&.hbase.pb.GetSchemaAlterStatusRe" +
"sponse\022b\n\023GetTableDescriptors\022$.hbase.pb" +
@ -75468,17 +75547,20 @@ public final class MasterProtos {
"\n\033UpdateReplicationPeerConfig\022,.hbase.pb" +
".UpdateReplicationPeerConfigRequest\032-.hb" +
"ase.pb.UpdateReplicationPeerConfigRespon",
"se\022t\n\031listDrainingRegionServers\022*.hbase." +
"pb.ListDrainingRegionServersRequest\032+.hb" +
"ase.pb.ListDrainingRegionServersResponse" +
"\022_\n\022drainRegionServers\022#.hbase.pb.DrainR" +
"egionServersRequest\032$.hbase.pb.DrainRegi" +
"onServersResponse\022}\n\034removeDrainFromRegi" +
"onServers\022-.hbase.pb.RemoveDrainFromRegi" +
"onServersRequest\032..hbase.pb.RemoveDrainF" +
"romRegionServersResponseBI\n1org.apache.h" +
"adoop.hbase.shaded.protobuf.generatedB\014M",
"asterProtosH\001\210\001\001\240\001\001"
"se\022e\n\024ListReplicationPeers\022%.hbase.pb.Li" +
"stReplicationPeersRequest\032&.hbase.pb.Lis" +
"tReplicationPeersResponse\022t\n\031listDrainin" +
"gRegionServers\022*.hbase.pb.ListDrainingRe" +
"gionServersRequest\032+.hbase.pb.ListDraini" +
"ngRegionServersResponse\022_\n\022drainRegionSe" +
"rvers\022#.hbase.pb.DrainRegionServersReque" +
"st\032$.hbase.pb.DrainRegionServersResponse" +
"\022}\n\034removeDrainFromRegionServers\022-.hbase" +
".pb.RemoveDrainFromRegionServersRequest\032",
"..hbase.pb.RemoveDrainFromRegionServersR" +
"esponseBI\n1org.apache.hadoop.hbase.shade" +
"d.protobuf.generatedB\014MasterProtosH\001\210\001\001\240" +
"\001\001"
};
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {

View File

@ -893,6 +893,10 @@ service MasterService {
rpc UpdateReplicationPeerConfig(UpdateReplicationPeerConfigRequest)
returns(UpdateReplicationPeerConfigResponse);
/** Returns a list of replication peers */
rpc ListReplicationPeers(ListReplicationPeersRequest)
returns(ListReplicationPeersResponse);
/** Returns a list of ServerNames marked as draining. */
rpc listDrainingRegionServers(ListDrainingRegionServersRequest)
returns(ListDrainingRegionServersResponse);

View File

@ -57,6 +57,15 @@ message ReplicationState {
required State state = 1;
}
/**
* Used by replication. Description of the replication peer.
*/
message ReplicationPeerDescription {
required string id = 1;
required ReplicationState state = 2;
required ReplicationPeer config = 3;
}
/**
* Used by replication. Holds the current position in an WAL file.
*/
@ -109,3 +118,11 @@ message UpdateReplicationPeerConfigRequest {
message UpdateReplicationPeerConfigResponse {
}
message ListReplicationPeersRequest {
optional string regex = 1;
}
message ListReplicationPeersResponse {
repeated ReplicationPeerDescription peer_desc = 1;
}

View File

@ -1951,4 +1951,24 @@ public interface MasterObserver extends Coprocessor {
final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
ReplicationPeerConfig peerConfig) throws IOException {
}
/**
* Called before list replication peers.
* @param ctx
* @param regex The regular expression to match peer id
* @throws IOException on failure
*/
default void preListReplicationPeers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String regex) throws IOException {
}
/**
* Called after list replication peers.
* @param ctx
* @param regex The regular expression to match peer id
* @throws IOException on failure
*/
default void postListReplicationPeers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String regex) throws IOException {
}
}

View File

@ -142,6 +142,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
@ -3224,6 +3225,21 @@ public class HMaster extends HRegionServer implements MasterServices {
}
}
@Override
public List<ReplicationPeerDescription> listReplicationPeers(String regex)
throws ReplicationException, IOException {
if (cpHost != null) {
cpHost.preListReplicationPeers(regex);
}
LOG.info(getClientIdAuditPrefix() + " list replication peers, regex=" + regex);
Pattern pattern = regex == null ? null : Pattern.compile(regex);
List<ReplicationPeerDescription> peers = this.replicationManager.listReplicationPeers(pattern);
if (cpHost != null) {
cpHost.postListReplicationPeers(regex);
}
return peers;
}
@Override
public void drainRegionServer(final ServerName server) {
String parentZnode = getZooKeeper().znodePaths.drainingZNode;

View File

@ -1769,4 +1769,24 @@ public class MasterCoprocessorHost
}
});
}
public void preListReplicationPeers(final String regex) throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
observer.preListReplicationPeers(ctx, regex);
}
});
}
public void postListReplicationPeers(final String regex) throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
observer.postListReplicationPeers(ctx, regex);
}
});
}
}

View File

@ -95,6 +95,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Enabl
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
@ -102,6 +104,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Updat
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessController;
import org.apache.hadoop.hbase.security.visibility.VisibilityController;
@ -1727,6 +1730,22 @@ public class MasterRpcServices extends RSRpcServices
}
}
@Override
public ListReplicationPeersResponse listReplicationPeers(RpcController controller,
ListReplicationPeersRequest request) throws ServiceException {
ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder();
try {
List<ReplicationPeerDescription> peers = master
.listReplicationPeers(request.hasRegex() ? request.getRegex() : null);
for (ReplicationPeerDescription peer : peers) {
response.addPeerDesc(ReplicationSerDeHelper.toProtoReplicationPeerDescription(peer));
}
} catch (ReplicationException | IOException e) {
throw new ServiceException(e);
}
return response.build();
}
@Override
public ListDrainingRegionServersResponse listDrainingRegionServers(RpcController controller,
ListDrainingRegionServersRequest request) throws ServiceException {

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import com.google.protobuf.Service;
@ -461,6 +462,14 @@ public interface MasterServices extends Server {
void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
throws ReplicationException, IOException;
/**
* Return a list of replication peers.
* @param regex The regular expression to match peer id
* @return a list of replication peers description
*/
List<ReplicationPeerDescription> listReplicationPeers(String regex) throws ReplicationException,
IOException;
/**
* Mark a region server as draining to prevent additional regions from getting assigned to it.
* @param server Region servers to drain.

View File

@ -18,9 +18,12 @@
package org.apache.hadoop.hbase.master.replication;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
@ -30,6 +33,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.replication.ReplicationPeers;
import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
@ -100,6 +104,20 @@ public class ReplicationManager {
this.replicationPeers.updatePeerConfig(peerId, peerConfig);
}
public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern)
throws ReplicationException {
List<ReplicationPeerDescription> peers = new ArrayList<>();
List<String> peerIds = replicationPeers.getAllPeerIds();
for (String peerId : peerIds) {
if (pattern == null || (pattern != null && pattern.matcher(peerId).matches())) {
peers.add(new ReplicationPeerDescription(peerId, replicationPeers
.getStatusOfPeerFromBackingStore(peerId), replicationPeers
.getReplicationPeerConfig(peerId)));
}
}
return peers;
}
/**
* Set a namespace in the peer config means that all tables in this namespace
* will be replicated to the peer cluster.

View File

@ -2733,4 +2733,10 @@ public class AccessController extends BaseMasterAndRegionObserver
ReplicationPeerConfig peerConfig) throws IOException {
requirePermission(getActiveUser(ctx), "updateReplicationPeerConfig", Action.ADMIN);
}
}
@Override
public void preListReplicationPeers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String regex) throws IOException {
requirePermission(getActiveUser(ctx), "listReplicationPeers", Action.ADMIN);
}
}

View File

@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.replication.ReplicationException;
@ -207,7 +208,7 @@ public class TestReplicationAdmin {
assertFalse(admin.getPeerState(ID_ONE));
try {
admin.getPeerState(ID_SECOND);
} catch (IllegalArgumentException iae) {
} catch (ReplicationPeerNotFoundException e) {
// OK!
}
admin.removePeer(ID_ONE);

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -425,4 +426,10 @@ public class MockNoopMasterServices implements MasterServices, Server {
public void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
throws ReplicationException, IOException {
}
@Override
public List<ReplicationPeerDescription> listReplicationPeers(String regex)
throws ReplicationException, IOException {
return null;
}
}

View File

@ -2961,4 +2961,19 @@ public class TestAccessController extends SecureTestUtil {
verifyAllowed(action, SUPERUSER, USER_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
@Test
public void testListReplicationPeers() throws Exception {
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preListReplicationPeers(ObserverContext.createAndPrepare(CP_ENV, null),
"test");
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
}

View File

@ -122,6 +122,7 @@ In case the table goes out of date, the unit tests which check for accuracy of p
| | disableReplicationPeer | superuser\|global(A)
| | getReplicationPeerConfig | superuser\|global(A)
| | updateReplicationPeerConfig | superuser\|global(A)
| | listReplicationPeers | superuser\|global(A)
| Region | openRegion | superuser\|global(A)
| | closeRegion | superuser\|global(A)
| | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)