HBASE-11392 add/remove peer requests should be routed through master

This commit is contained in:
Guanghao Zhang 2016-12-20 21:20:58 +08:00 committed by Guanghao Zhang
parent 3826e63967
commit e1f4aaeacd
27 changed files with 3173 additions and 375 deletions

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaRetriever; import org.apache.hadoop.hbase.quotas.QuotaRetriever;
import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@ -1823,4 +1824,22 @@ public interface Admin extends Abortable, Closeable {
* @return true if the switch is enabled, false otherwise. * @return true if the switch is enabled, false otherwise.
*/ */
boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException; boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException;
/**
* Add a new replication peer for replicating data to slave cluster
* @param peerId a short name that identifies the peer
* @param peerConfig configuration for the replication slave cluster
* @throws IOException
*/
default void addReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig)
throws IOException {
}
/**
* Remove a peer and stop the replication
* @param peerId a short name that identifies the peer
* @throws IOException
*/
default void removeReplicationPeer(final String peerId) throws IOException {
}
} }

View File

@ -88,6 +88,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCa
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.ExceptionUtil;
@ -1637,6 +1641,18 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
SecurityCapabilitiesRequest request) throws ServiceException { SecurityCapabilitiesRequest request) throws ServiceException {
return stub.getSecurityCapabilities(controller, request); return stub.getSecurityCapabilities(controller, request);
} }
@Override
public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
AddReplicationPeerRequest request) throws ServiceException {
return stub.addReplicationPeer(controller, request);
}
@Override
public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
RemoveReplicationPeerRequest request) throws ServiceException {
return stub.removeReplicationPeer(controller, request);
}
}; };
} }

View File

@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaRetriever; import org.apache.hadoop.hbase.quotas.QuotaRetriever;
import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@ -3744,4 +3745,29 @@ public class HBaseAdmin implements Admin {
private RpcControllerFactory getRpcControllerFactory() { private RpcControllerFactory getRpcControllerFactory() {
return this.rpcControllerFactory; return this.rpcControllerFactory;
} }
@Override
public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
throws IOException {
executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
@Override
protected Void rpcCall() throws Exception {
master.addReplicationPeer(getRpcController(),
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig));
return null;
}
});
}
@Override
public void removeReplicationPeer(String peerId) throws IOException {
executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
@Override
protected Void rpcCall() throws Exception {
master.removeReplicationPeer(getRpcController(),
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
return null;
}
});
}
} }

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationFactory;
@ -80,9 +81,12 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
* To see which commands are available in the shell, type * To see which commands are available in the shell, type
* <code>replication</code>. * <code>replication</code>.
* </p> * </p>
*
* @deprecated use {@link org.apache.hadoop.hbase.client.Admin} instead.
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving
@Deprecated
public class ReplicationAdmin implements Closeable { public class ReplicationAdmin implements Closeable {
private static final Log LOG = LogFactory.getLog(ReplicationAdmin.class); private static final Log LOG = LogFactory.getLog(ReplicationAdmin.class);
@ -108,6 +112,8 @@ public class ReplicationAdmin implements Closeable {
*/ */
private final ZooKeeperWatcher zkw; private final ZooKeeperWatcher zkw;
private Admin admin;
/** /**
* Constructor that creates a connection to the local ZooKeeper ensemble. * Constructor that creates a connection to the local ZooKeeper ensemble.
* @param conf Configuration to use * @param conf Configuration to use
@ -116,6 +122,7 @@ public class ReplicationAdmin implements Closeable {
*/ */
public ReplicationAdmin(Configuration conf) throws IOException { public ReplicationAdmin(Configuration conf) throws IOException {
this.connection = ConnectionFactory.createConnection(conf); this.connection = ConnectionFactory.createConnection(conf);
admin = connection.getAdmin();
try { try {
zkw = createZooKeeperWatcher(); zkw = createZooKeeperWatcher();
try { try {
@ -133,9 +140,7 @@ public class ReplicationAdmin implements Closeable {
throw exception; throw exception;
} }
} catch (Exception exception) { } catch (Exception exception) {
if (connection != null) { connection.close();
connection.close();
}
if (exception instanceof IOException) { if (exception instanceof IOException) {
throw (IOException) exception; throw (IOException) exception;
} else if (exception instanceof RuntimeException) { } else if (exception instanceof RuntimeException) {
@ -176,11 +181,12 @@ public class ReplicationAdmin implements Closeable {
*/ */
@Deprecated @Deprecated
public void addPeer(String id, ReplicationPeerConfig peerConfig, public void addPeer(String id, ReplicationPeerConfig peerConfig,
Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException { Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException,
IOException {
if (tableCfs != null) { if (tableCfs != null) {
peerConfig.setTableCFsMap(tableCfs); peerConfig.setTableCFsMap(tableCfs);
} }
this.replicationPeers.registerPeer(id, peerConfig); this.admin.addReplicationPeer(id, peerConfig);
} }
/** /**
@ -188,10 +194,11 @@ public class ReplicationAdmin implements Closeable {
* @param id a short name that identifies the cluster * @param id a short name that identifies the cluster
* @param peerConfig configuration for the replication slave cluster * @param peerConfig configuration for the replication slave cluster
*/ */
public void addPeer(String id, ReplicationPeerConfig peerConfig) throws ReplicationException { public void addPeer(String id, ReplicationPeerConfig peerConfig) throws ReplicationException,
IOException {
checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(), checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
peerConfig.getTableCFsMap()); peerConfig.getTableCFsMap());
this.replicationPeers.registerPeer(id, peerConfig); this.admin.addReplicationPeer(id, peerConfig);
} }
/** /**
@ -213,8 +220,8 @@ public class ReplicationAdmin implements Closeable {
* Removes a peer cluster and stops the replication to it. * Removes a peer cluster and stops the replication to it.
* @param id a short name that identifies the cluster * @param id a short name that identifies the cluster
*/ */
public void removePeer(String id) throws ReplicationException { public void removePeer(String id) throws IOException {
this.replicationPeers.unregisterPeer(id); this.admin.removeReplicationPeer(id);
} }
/** /**
@ -403,6 +410,7 @@ public class ReplicationAdmin implements Closeable {
if (this.connection != null) { if (this.connection != null) {
this.connection.close(); this.connection.close();
} }
admin.close();
} }

View File

@ -41,10 +41,10 @@ public class ReplicationFactory {
} }
public static ReplicationQueuesClient getReplicationQueuesClient( public static ReplicationQueuesClient getReplicationQueuesClient(
ReplicationQueuesClientArguments args) ReplicationQueuesClientArguments args) throws Exception {
throws Exception { Class<?> classToBuild = args.getConf().getClass(
Class<?> classToBuild = args.getConf().getClass("hbase.region.replica." + "hbase.region.replica.replication.replicationQueuesClient.class",
"replication.replicationQueuesClient.class", ReplicationQueuesClientZKImpl.class); ReplicationQueuesClientZKImpl.class);
return (ReplicationQueuesClient) ConstructorUtils.invokeConstructor(classToBuild, args); return (ReplicationQueuesClient) ConstructorUtils.invokeConstructor(classToBuild, args);
} }

View File

@ -44,8 +44,10 @@ import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
@ -110,6 +112,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
@ -1560,4 +1565,19 @@ public final class RequestConverter {
} }
throw new UnsupportedOperationException("Unsupport switch type:" + switchType); throw new UnsupportedOperationException("Unsupport switch type:" + switchType);
} }
public static ReplicationProtos.AddReplicationPeerRequest buildAddReplicationPeerRequest(
String peerId, ReplicationPeerConfig peerConfig) {
AddReplicationPeerRequest.Builder builder = AddReplicationPeerRequest.newBuilder();
builder.setPeerId(peerId);
builder.setPeerConfig(ReplicationSerDeHelper.convert(peerConfig));
return builder.build();
}
public static ReplicationProtos.RemoveReplicationPeerRequest buildRemoveReplicationPeerRequest(
String peerId) {
RemoveReplicationPeerRequest.Builder builder = RemoveReplicationPeerRequest.newBuilder();
builder.setPeerId(peerId);
return builder.build();
}
} }

View File

@ -66344,6 +66344,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse> done); org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse> done);
/**
* <pre>
** Add a replication peer
* </pre>
*
* <code>rpc AddReplicationPeer(.hbase.pb.AddReplicationPeerRequest) returns (.hbase.pb.AddReplicationPeerResponse);</code>
*/
public abstract void addReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse> done);
/**
* <pre>
** Remove a replication peer
* </pre>
*
* <code>rpc RemoveReplicationPeer(.hbase.pb.RemoveReplicationPeerRequest) returns (.hbase.pb.RemoveReplicationPeerResponse);</code>
*/
public abstract void removeReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done);
} }
public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService( public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService(
@ -66813,6 +66837,22 @@ public final class MasterProtos {
impl.listProcedures(controller, request, done); impl.listProcedures(controller, request, done);
} }
@java.lang.Override
public void addReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse> done) {
impl.addReplicationPeer(controller, request, done);
}
@java.lang.Override
public void removeReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done) {
impl.removeReplicationPeer(controller, request, done);
}
}; };
} }
@ -66951,6 +66991,10 @@ public final class MasterProtos {
return impl.abortProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest)request); return impl.abortProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest)request);
case 57: case 57:
return impl.listProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest)request); return impl.listProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest)request);
case 58:
return impl.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request);
case 59:
return impl.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request);
default: default:
throw new java.lang.AssertionError("Can't get here."); throw new java.lang.AssertionError("Can't get here.");
} }
@ -67081,6 +67125,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
case 57: case 57:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
case 58:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
case 59:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
default: default:
throw new java.lang.AssertionError("Can't get here."); throw new java.lang.AssertionError("Can't get here.");
} }
@ -67211,6 +67259,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
case 57: case 57:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
case 58:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
case 59:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
default: default:
throw new java.lang.AssertionError("Can't get here."); throw new java.lang.AssertionError("Can't get here.");
} }
@ -67944,6 +67996,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse> done); org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse> done);
/**
* <pre>
** Add a replication peer
* </pre>
*
* <code>rpc AddReplicationPeer(.hbase.pb.AddReplicationPeerRequest) returns (.hbase.pb.AddReplicationPeerResponse);</code>
*/
public abstract void addReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse> done);
/**
* <pre>
** Remove a replication peer
* </pre>
*
* <code>rpc RemoveReplicationPeer(.hbase.pb.RemoveReplicationPeerRequest) returns (.hbase.pb.RemoveReplicationPeerResponse);</code>
*/
public abstract void removeReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done);
public static final public static final
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() { getDescriptor() {
@ -68256,6 +68332,16 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse>specializeCallback( org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse>specializeCallback(
done)); done));
return; return;
case 58:
this.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse>specializeCallback(
done));
return;
case 59:
this.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse>specializeCallback(
done));
return;
default: default:
throw new java.lang.AssertionError("Can't get here."); throw new java.lang.AssertionError("Can't get here.");
} }
@ -68386,6 +68472,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
case 57: case 57:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
case 58:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
case 59:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
default: default:
throw new java.lang.AssertionError("Can't get here."); throw new java.lang.AssertionError("Can't get here.");
} }
@ -68516,6 +68606,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
case 57: case 57:
return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
case 58:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
case 59:
return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
default: default:
throw new java.lang.AssertionError("Can't get here."); throw new java.lang.AssertionError("Can't get here.");
} }
@ -69406,6 +69500,36 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.class,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance())); org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()));
} }
public void addReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse> done) {
channel.callMethod(
getDescriptor().getMethods().get(58),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance(),
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.class,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance()));
}
public void removeReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done) {
channel.callMethod(
getDescriptor().getMethods().get(59),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance(),
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.class,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance()));
}
} }
public static BlockingInterface newBlockingStub( public static BlockingInterface newBlockingStub(
@ -69703,6 +69827,16 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request) org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse addReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse removeReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
} }
private static final class BlockingStub implements BlockingInterface { private static final class BlockingStub implements BlockingInterface {
@ -70407,6 +70541,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance());
} }
public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse addReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) channel.callBlockingMethod(
getDescriptor().getMethods().get(58),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance());
}
public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse removeReplicationPeer(
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse) channel.callBlockingMethod(
getDescriptor().getMethods().get(59),
controller,
request,
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance());
}
} }
// @@protoc_insertion_point(class_scope:hbase.pb.MasterService) // @@protoc_insertion_point(class_scope:hbase.pb.MasterService)
@ -70989,340 +71147,346 @@ public final class MasterProtos {
"\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014C" + "\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014C" +
"lient.proto\032\023ClusterStatus.proto\032\023ErrorH" + "lient.proto\032\023ClusterStatus.proto\032\023ErrorH" +
"andling.proto\032\017Procedure.proto\032\013Quota.pr" + "andling.proto\032\017Procedure.proto\032\013Quota.pr" +
"oto\"\234\001\n\020AddColumnRequest\022\'\n\ntable_name\030\001" + "oto\032\021Replication.proto\"\234\001\n\020AddColumnRequ" +
" \002(\0132\023.hbase.pb.TableName\0225\n\017column_fami" + "est\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Table" +
"lies\030\002 \002(\0132\034.hbase.pb.ColumnFamilySchema" + "Name\0225\n\017column_families\030\002 \002(\0132\034.hbase.pb" +
"\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:" + ".ColumnFamilySchema\022\026\n\013nonce_group\030\003 \001(\004" +
"\0010\"$\n\021AddColumnResponse\022\017\n\007proc_id\030\001 \001(\004" + ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"$\n\021AddColumnRespon" +
"\"}\n\023DeleteColumnRequest\022\'\n\ntable_name\030\001 " + "se\022\017\n\007proc_id\030\001 \001(\004\"}\n\023DeleteColumnReque" +
"\002(\0132\023.hbase.pb.TableName\022\023\n\013column_name\030", "st\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableN",
"\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004" + "ame\022\023\n\013column_name\030\002 \002(\014\022\026\n\013nonce_group\030" +
" \001(\004:\0010\"\'\n\024DeleteColumnResponse\022\017\n\007proc_" + "\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024DeleteColu" +
"id\030\001 \001(\004\"\237\001\n\023ModifyColumnRequest\022\'\n\ntabl" + "mnResponse\022\017\n\007proc_id\030\001 \001(\004\"\237\001\n\023ModifyCo" +
"e_name\030\001 \002(\0132\023.hbase.pb.TableName\0225\n\017col" + "lumnRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase." +
"umn_families\030\002 \002(\0132\034.hbase.pb.ColumnFami" + "pb.TableName\0225\n\017column_families\030\002 \002(\0132\034." +
"lySchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonc" + "hbase.pb.ColumnFamilySchema\022\026\n\013nonce_gro" +
"e\030\004 \001(\004:\0010\"\'\n\024ModifyColumnResponse\022\017\n\007pr" + "up\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024ModifyC" +
"oc_id\030\001 \001(\004\"n\n\021MoveRegionRequest\022)\n\006regi" + "olumnResponse\022\017\n\007proc_id\030\001 \001(\004\"n\n\021MoveRe" +
"on\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\020d" + "gionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" +
"est_server_name\030\002 \001(\0132\024.hbase.pb.ServerN", "egionSpecifier\022.\n\020dest_server_name\030\002 \001(\013",
"ame\"\024\n\022MoveRegionResponse\"\274\001\n\035DispatchMe" + "2\024.hbase.pb.ServerName\"\024\n\022MoveRegionResp" +
"rgingRegionsRequest\022+\n\010region_a\030\001 \002(\0132\031." + "onse\"\274\001\n\035DispatchMergingRegionsRequest\022+" +
"hbase.pb.RegionSpecifier\022+\n\010region_b\030\002 \002" + "\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpecif" +
"(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010forcibl" + "ier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.RegionS" +
"e\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004 \001(\004:\0010\022\020" + "pecifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\026\n\013non" +
"\n\005nonce\030\005 \001(\004:\0010\"1\n\036DispatchMergingRegio" + "ce_group\030\004 \001(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\"1\n\036D" +
"nsResponse\022\017\n\007proc_id\030\001 \001(\004\"\210\001\n\030MergeTab" + "ispatchMergingRegionsResponse\022\017\n\007proc_id" +
"leRegionsRequest\022)\n\006region\030\001 \003(\0132\031.hbase" + "\030\001 \001(\004\"\210\001\n\030MergeTableRegionsRequest\022)\n\006r" +
".pb.RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005f" + "egion\030\001 \003(\0132\031.hbase.pb.RegionSpecifier\022\027" +
"alse\022\026\n\013nonce_group\030\004 \001(\004:\0010\022\020\n\005nonce\030\005 ", "\n\010forcible\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004",
"\001(\004:\0010\",\n\031MergeTableRegionsResponse\022\017\n\007p" + " \001(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\",\n\031MergeTableR" +
"roc_id\030\001 \001(\004\"@\n\023AssignRegionRequest\022)\n\006r" + "egionsResponse\022\017\n\007proc_id\030\001 \001(\004\"@\n\023Assig" +
"egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"\026" + "nRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.p" +
"\n\024AssignRegionResponse\"X\n\025UnassignRegion" + "b.RegionSpecifier\"\026\n\024AssignRegionRespons" +
"Request\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regio" + "e\"X\n\025UnassignRegionRequest\022)\n\006region\030\001 \002" +
"nSpecifier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026Unas" + "(\0132\031.hbase.pb.RegionSpecifier\022\024\n\005force\030\002" +
"signRegionResponse\"A\n\024OfflineRegionReque" + " \001(\010:\005false\"\030\n\026UnassignRegionResponse\"A\n" +
"st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" + "\024OfflineRegionRequest\022)\n\006region\030\001 \002(\0132\031." +
"ifier\"\027\n\025OfflineRegionResponse\"\177\n\022Create" + "hbase.pb.RegionSpecifier\"\027\n\025OfflineRegio" +
"TableRequest\022+\n\014table_schema\030\001 \002(\0132\025.hba", "nResponse\"\177\n\022CreateTableRequest\022+\n\014table",
"se.pb.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\022\026\n" + "_schema\030\001 \002(\0132\025.hbase.pb.TableSchema\022\022\n\n" +
"\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"" + "split_keys\030\002 \003(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010" +
"&\n\023CreateTableResponse\022\017\n\007proc_id\030\001 \001(\004\"" + "\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023CreateTableRespons" +
"g\n\022DeleteTableRequest\022\'\n\ntable_name\030\001 \002(" + "e\022\017\n\007proc_id\030\001 \001(\004\"g\n\022DeleteTableRequest" +
"\0132\023.hbase.pb.TableName\022\026\n\013nonce_group\030\002 " + "\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNam" +
"\001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023DeleteTableR" + "e\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004" +
"esponse\022\017\n\007proc_id\030\001 \001(\004\"\207\001\n\024TruncateTab" + ":\0010\"&\n\023DeleteTableResponse\022\017\n\007proc_id\030\001 " +
"leRequest\022&\n\ttableName\030\001 \002(\0132\023.hbase.pb." + "\001(\004\"\207\001\n\024TruncateTableRequest\022&\n\ttableNam" +
"TableName\022\035\n\016preserveSplits\030\002 \001(\010:\005false" + "e\030\001 \002(\0132\023.hbase.pb.TableName\022\035\n\016preserve" +
"\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:", "Splits\030\002 \001(\010:\005false\022\026\n\013nonce_group\030\003 \001(\004",
"\0010\"(\n\025TruncateTableResponse\022\017\n\007proc_id\030\001" + ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"(\n\025TruncateTableRe" +
" \001(\004\"g\n\022EnableTableRequest\022\'\n\ntable_name" + "sponse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022EnableTableRe" +
"\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonce_gro" + "quest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Tab" +
"up\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023EnableT" + "leName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" +
"ableResponse\022\017\n\007proc_id\030\001 \001(\004\"h\n\023Disable" + "\003 \001(\004:\0010\"&\n\023EnableTableResponse\022\017\n\007proc_" +
"TableRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase" + "id\030\001 \001(\004\"h\n\023DisableTableRequest\022\'\n\ntable" +
".pb.TableName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n" + "_name\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonc" +
"\005nonce\030\003 \001(\004:\0010\"\'\n\024DisableTableResponse\022" + "e_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\'\n\024Di" +
"\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022ModifyTableRequest\022" + "sableTableResponse\022\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022" +
"\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName", "ModifyTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023",
"\022+\n\014table_schema\030\002 \002(\0132\025.hbase.pb.TableS" + ".hbase.pb.TableName\022+\n\014table_schema\030\002 \002(" +
"chema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004" + "\0132\025.hbase.pb.TableSchema\022\026\n\013nonce_group\030" +
" \001(\004:\0010\"&\n\023ModifyTableResponse\022\017\n\007proc_i" + "\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTabl" +
"d\030\001 \001(\004\"~\n\026CreateNamespaceRequest\022:\n\023nam" + "eResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026CreateName" +
"espaceDescriptor\030\001 \002(\0132\035.hbase.pb.Namesp" + "spaceRequest\022:\n\023namespaceDescriptor\030\001 \002(" +
"aceDescriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n" + "\0132\035.hbase.pb.NamespaceDescriptor\022\026\n\013nonc" +
"\005nonce\030\003 \001(\004:\0010\"*\n\027CreateNamespaceRespon" + "e_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Cr" +
"se\022\017\n\007proc_id\030\001 \001(\004\"Y\n\026DeleteNamespaceRe" + "eateNamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"Y" +
"quest\022\025\n\rnamespaceName\030\001 \002(\t\022\026\n\013nonce_gr" + "\n\026DeleteNamespaceRequest\022\025\n\rnamespaceNam" +
"oup\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Delete", "e\030\001 \002(\t\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce",
"NamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026Mo" + "\030\003 \001(\004:\0010\"*\n\027DeleteNamespaceResponse\022\017\n\007" +
"difyNamespaceRequest\022:\n\023namespaceDescrip" + "proc_id\030\001 \001(\004\"~\n\026ModifyNamespaceRequest\022" +
"tor\030\001 \002(\0132\035.hbase.pb.NamespaceDescriptor" + ":\n\023namespaceDescriptor\030\001 \002(\0132\035.hbase.pb." +
"\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:" + "NamespaceDescriptor\022\026\n\013nonce_group\030\002 \001(\004" +
"\0010\"*\n\027ModifyNamespaceResponse\022\017\n\007proc_id" + ":\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027ModifyNamespace" +
"\030\001 \001(\004\"6\n\035GetNamespaceDescriptorRequest\022" + "Response\022\017\n\007proc_id\030\001 \001(\004\"6\n\035GetNamespac" +
"\025\n\rnamespaceName\030\001 \002(\t\"\\\n\036GetNamespaceDe" + "eDescriptorRequest\022\025\n\rnamespaceName\030\001 \002(" +
"scriptorResponse\022:\n\023namespaceDescriptor\030" + "\t\"\\\n\036GetNamespaceDescriptorResponse\022:\n\023n" +
"\001 \002(\0132\035.hbase.pb.NamespaceDescriptor\"!\n\037" + "amespaceDescriptor\030\001 \002(\0132\035.hbase.pb.Name" +
"ListNamespaceDescriptorsRequest\"^\n ListN", "spaceDescriptor\"!\n\037ListNamespaceDescript",
"amespaceDescriptorsResponse\022:\n\023namespace" + "orsRequest\"^\n ListNamespaceDescriptorsRe" +
"Descriptor\030\001 \003(\0132\035.hbase.pb.NamespaceDes" + "sponse\022:\n\023namespaceDescriptor\030\001 \003(\0132\035.hb" +
"criptor\"?\n&ListTableDescriptorsByNamespa" + "ase.pb.NamespaceDescriptor\"?\n&ListTableD" +
"ceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"U\n\'List" + "escriptorsByNamespaceRequest\022\025\n\rnamespac" +
"TableDescriptorsByNamespaceResponse\022*\n\013t" + "eName\030\001 \002(\t\"U\n\'ListTableDescriptorsByNam" +
"ableSchema\030\001 \003(\0132\025.hbase.pb.TableSchema\"" + "espaceResponse\022*\n\013tableSchema\030\001 \003(\0132\025.hb" +
"9\n ListTableNamesByNamespaceRequest\022\025\n\rn" + "ase.pb.TableSchema\"9\n ListTableNamesByNa" +
"amespaceName\030\001 \002(\t\"K\n!ListTableNamesByNa" + "mespaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"K\n" +
"mespaceResponse\022&\n\ttableName\030\001 \003(\0132\023.hba" + "!ListTableNamesByNamespaceResponse\022&\n\tta" +
"se.pb.TableName\"\021\n\017ShutdownRequest\"\022\n\020Sh", "bleName\030\001 \003(\0132\023.hbase.pb.TableName\"\021\n\017Sh",
"utdownResponse\"\023\n\021StopMasterRequest\"\024\n\022S" + "utdownRequest\"\022\n\020ShutdownResponse\"\023\n\021Sto" +
"topMasterResponse\"\034\n\032IsInMaintenanceMode" + "pMasterRequest\"\024\n\022StopMasterResponse\"\034\n\032" +
"Request\"8\n\033IsInMaintenanceModeResponse\022\031" + "IsInMaintenanceModeRequest\"8\n\033IsInMainte" +
"\n\021inMaintenanceMode\030\001 \002(\010\"\037\n\016BalanceRequ" + "nanceModeResponse\022\031\n\021inMaintenanceMode\030\001" +
"est\022\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n" + " \002(\010\"\037\n\016BalanceRequest\022\r\n\005force\030\001 \001(\010\"\'\n" +
"\014balancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunnin" + "\017BalanceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<" +
"gRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(" + "\n\031SetBalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022" +
"\010\"8\n\032SetBalancerRunningResponse\022\032\n\022prev_" + "\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBalancerRunni" +
"balance_value\030\001 \001(\010\"\032\n\030IsBalancerEnabled" + "ngResponse\022\032\n\022prev_balance_value\030\001 \001(\010\"\032" +
"Request\",\n\031IsBalancerEnabledResponse\022\017\n\007", "\n\030IsBalancerEnabledRequest\",\n\031IsBalancer",
"enabled\030\001 \002(\010\"w\n\035SetSplitOrMergeEnabledR" + "EnabledResponse\022\017\n\007enabled\030\001 \002(\010\"w\n\035SetS" +
"equest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous\030\002" + "plitOrMergeEnabledRequest\022\017\n\007enabled\030\001 \002" +
" \001(\010\0220\n\014switch_types\030\003 \003(\0162\032.hbase.pb.Ma" + "(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014switch_types\030" +
"sterSwitchType\"4\n\036SetSplitOrMergeEnabled" + "\003 \003(\0162\032.hbase.pb.MasterSwitchType\"4\n\036Set" +
"Response\022\022\n\nprev_value\030\001 \003(\010\"O\n\034IsSplitO" + "SplitOrMergeEnabledResponse\022\022\n\nprev_valu" +
"rMergeEnabledRequest\022/\n\013switch_type\030\001 \002(" + "e\030\001 \003(\010\"O\n\034IsSplitOrMergeEnabledRequest\022" +
"\0162\032.hbase.pb.MasterSwitchType\"0\n\035IsSplit" + "/\n\013switch_type\030\001 \002(\0162\032.hbase.pb.MasterSw" +
"OrMergeEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"" + "itchType\"0\n\035IsSplitOrMergeEnabledRespons" +
"\022\n\020NormalizeRequest\"+\n\021NormalizeResponse" + "e\022\017\n\007enabled\030\001 \002(\010\"\022\n\020NormalizeRequest\"+" +
"\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNormalize", "\n\021NormalizeResponse\022\026\n\016normalizer_ran\030\001 ",
"rRunningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormal" + "\002(\010\")\n\033SetNormalizerRunningRequest\022\n\n\002on" +
"izerRunningResponse\022\035\n\025prev_normalizer_v" + "\030\001 \002(\010\"=\n\034SetNormalizerRunningResponse\022\035" +
"alue\030\001 \001(\010\"\034\n\032IsNormalizerEnabledRequest" + "\n\025prev_normalizer_value\030\001 \001(\010\"\034\n\032IsNorma" +
"\".\n\033IsNormalizerEnabledResponse\022\017\n\007enabl" + "lizerEnabledRequest\".\n\033IsNormalizerEnabl" +
"ed\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026Run" + "edResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalo" +
"CatalogScanResponse\022\023\n\013scan_result\030\001 \001(\005" + "gScanRequest\"-\n\026RunCatalogScanResponse\022\023" +
"\"-\n\033EnableCatalogJanitorRequest\022\016\n\006enabl" + "\n\013scan_result\030\001 \001(\005\"-\n\033EnableCatalogJani" +
"e\030\001 \002(\010\"2\n\034EnableCatalogJanitorResponse\022" + "torRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCata" +
"\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanitorE" + "logJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" " +
"nabledRequest\"0\n\037IsCatalogJanitorEnabled", "\n\036IsCatalogJanitorEnabledRequest\"0\n\037IsCa",
"Response\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotReque" + "talogJanitorEnabledResponse\022\r\n\005value\030\001 \002" +
"st\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snapshot" + "(\010\"B\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132" +
"Description\",\n\020SnapshotResponse\022\030\n\020expec" + "\035.hbase.pb.SnapshotDescription\",\n\020Snapsh" +
"ted_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnapsho" + "otResponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034" +
"tsRequest\"Q\n\035GetCompletedSnapshotsRespon" + "GetCompletedSnapshotsRequest\"Q\n\035GetCompl" +
"se\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.Snapsho" + "etedSnapshotsResponse\0220\n\tsnapshots\030\001 \003(\013" +
"tDescription\"H\n\025DeleteSnapshotRequest\022/\n" + "2\035.hbase.pb.SnapshotDescription\"H\n\025Delet" +
"eSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hba" +
"se.pb.SnapshotDescription\"\030\n\026DeleteSnaps" +
"hotResponse\"s\n\026RestoreSnapshotRequest\022/\n",
"\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescr" + "\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescr" +
"iption\"\030\n\026DeleteSnapshotResponse\"s\n\026Rest" + "iption\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" +
"oreSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.h", "\003 \001(\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007p" +
"base.pb.SnapshotDescription\022\026\n\013nonce_gro" + "roc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n" +
"up\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Restore" + "\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescr" +
"SnapshotResponse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsS" + "iption\"^\n\026IsSnapshotDoneResponse\022\023\n\004done" +
"napshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.h" + "\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase." +
"base.pb.SnapshotDescription\"^\n\026IsSnapsho" + "pb.SnapshotDescription\"O\n\034IsRestoreSnaps" +
"tDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010sn" + "hotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase" +
"apshot\030\002 \001(\0132\035.hbase.pb.SnapshotDescript" + ".pb.SnapshotDescription\"4\n\035IsRestoreSnap",
"ion\"O\n\034IsRestoreSnapshotDoneRequest\022/\n\010s" + "shotDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n" +
"napshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescrip" + "\033GetSchemaAlterStatusRequest\022\'\n\ntable_na" +
"tion\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n", "me\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034GetSche" +
"\004done\030\001 \001(\010:\005false\"F\n\033GetSchemaAlterStat" + "maAlterStatusResponse\022\035\n\025yet_to_update_r" +
"usRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" + "egions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032" +
".TableName\"T\n\034GetSchemaAlterStatusRespon" + "GetTableDescriptorsRequest\022(\n\013table_name" +
"se\022\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rtot" + "s\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 " +
"al_regions\030\002 \001(\r\"\213\001\n\032GetTableDescriptors" + "\001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021" +
"Request\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb." + "\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescriptors" +
"TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022include_sys_" + "Response\022+\n\014table_schema\030\001 \003(\0132\025.hbase.p",
"tables\030\003 \001(\010:\005false\022\021\n\tnamespace\030\004 \001(\t\"J" + "b.TableSchema\"[\n\024GetTableNamesRequest\022\r\n" +
"\n\033GetTableDescriptorsResponse\022+\n\014table_s" + "\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010" +
"chema\030\001 \003(\0132\025.hbase.pb.TableSchema\"[\n\024Ge", ":\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNa" +
"tTableNamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022inc" + "mesResponse\022(\n\013table_names\030\001 \003(\0132\023.hbase" +
"lude_sys_tables\030\002 \001(\010:\005false\022\021\n\tnamespac" + ".pb.TableName\"?\n\024GetTableStateRequest\022\'\n" +
"e\030\003 \001(\t\"A\n\025GetTableNamesResponse\022(\n\013tabl" + "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"B" +
"e_names\030\001 \003(\0132\023.hbase.pb.TableName\"?\n\024Ge" + "\n\025GetTableStateResponse\022)\n\013table_state\030\001" +
"tTableStateRequest\022\'\n\ntable_name\030\001 \002(\0132\023" + " \002(\0132\024.hbase.pb.TableState\"\031\n\027GetCluster" +
".hbase.pb.TableName\"B\n\025GetTableStateResp" + "StatusRequest\"K\n\030GetClusterStatusRespons" +
"onse\022)\n\013table_state\030\001 \002(\0132\024.hbase.pb.Tab" + "e\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clu",
"leState\"\031\n\027GetClusterStatusRequest\"K\n\030Ge" + "sterStatus\"\030\n\026IsMasterRunningRequest\"4\n\027" +
"tClusterStatusResponse\022/\n\016cluster_status" + "IsMasterRunningResponse\022\031\n\021is_master_run" +
"\030\001 \002(\0132\027.hbase.pb.ClusterStatus\"\030\n\026IsMas", "ning\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tpr" +
"terRunningRequest\"4\n\027IsMasterRunningResp" + "ocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDescri" +
"onse\022\031\n\021is_master_running\030\001 \002(\010\"I\n\024ExecP" + "ption\"F\n\025ExecProcedureResponse\022\030\n\020expect" +
"rocedureRequest\0221\n\tprocedure\030\001 \002(\0132\036.hba" + "ed_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n" +
"se.pb.ProcedureDescription\"F\n\025ExecProced" + "\026IsProcedureDoneRequest\0221\n\tprocedure\030\001 \001" +
"ureResponse\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n" + "(\0132\036.hbase.pb.ProcedureDescription\"`\n\027Is" +
"\013return_data\030\002 \001(\014\"K\n\026IsProcedureDoneReq" + "ProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005fal" +
"uest\0221\n\tprocedure\030\001 \001(\0132\036.hbase.pb.Proce" + "se\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Procedur",
"dureDescription\"`\n\027IsProcedureDoneRespon" + "eDescription\",\n\031GetProcedureResultReques" +
"se\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(" + "t\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResul" +
"\0132\036.hbase.pb.ProcedureDescription\",\n\031Get", "tResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetP" +
"ProcedureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"" + "rocedureResultResponse.State\022\022\n\nstart_ti" +
"\371\001\n\032GetProcedureResultResponse\0229\n\005state\030" + "me\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030" +
"\001 \002(\0162*.hbase.pb.GetProcedureResultRespo" + "\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Fore" +
"nse.State\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_up" + "ignExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUN" +
"date\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texception\030" + "D\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortP" +
"\005 \001(\0132!.hbase.pb.ForeignExceptionMessage" + "rocedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayI" +
"\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n" + "nterruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortPr",
"\010FINISHED\020\002\"M\n\025AbortProcedureRequest\022\017\n\007" + "ocedureResponse\022\034\n\024is_procedure_aborted\030" +
"proc_id\030\001 \002(\004\022#\n\025mayInterruptIfRunning\030\002" + "\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListPr" +
" \001(\010:\004true\"6\n\026AbortProcedureResponse\022\034\n\024", "oceduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hb" +
"is_procedure_aborted\030\001 \002(\010\"\027\n\025ListProced" + "ase.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\t" +
"uresRequest\"@\n\026ListProceduresResponse\022&\n" + "user_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tn" +
"\tprocedure\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001" + "amespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hba" +
"\n\017SetQuotaRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\n" + "se.pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016b" +
"user_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\nt" + "ypass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031." +
"able_name\030\004 \001(\0132\023.hbase.pb.TableName\022\022\n\n" + "hbase.pb.ThrottleRequest\"\022\n\020SetQuotaResp" +
"remove_all\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010" + "onse\"J\n\037MajorCompactionTimestampRequest\022",
"\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.ThrottleRe" + "\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName" +
"quest\"\022\n\020SetQuotaResponse\"J\n\037MajorCompac" + "\"U\n(MajorCompactionTimestampForRegionReq" +
"tionTimestampRequest\022\'\n\ntable_name\030\001 \002(\013", "uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" +
"2\023.hbase.pb.TableName\"U\n(MajorCompaction" + "ecifier\"@\n MajorCompactionTimestampRespo" +
"TimestampForRegionRequest\022)\n\006region\030\001 \002(" + "nse\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Sec" +
"\0132\031.hbase.pb.RegionSpecifier\"@\n MajorCom" + "urityCapabilitiesRequest\"\354\001\n\034SecurityCap" +
"pactionTimestampResponse\022\034\n\024compaction_t" + "abilitiesResponse\022G\n\014capabilities\030\001 \003(\0162" +
"imestamp\030\001 \002(\003\"\035\n\033SecurityCapabilitiesRe" + "1.hbase.pb.SecurityCapabilitiesResponse." +
"quest\"\354\001\n\034SecurityCapabilitiesResponse\022G" + "Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTH" +
"\n\014capabilities\030\001 \003(\01621.hbase.pb.Security" + "ENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022",
"CapabilitiesResponse.Capability\"\202\001\n\nCapa" + "\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION" +
"bility\022\031\n\025SIMPLE_AUTHENTICATION\020\000\022\031\n\025SEC" + "\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwitchT" +
"URE_AUTHENTICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022", "ype\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\374*\n\rMasterServ" +
"\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_VISIBILI" + "ice\022e\n\024GetSchemaAlterStatus\022%.hbase.pb.G" +
"TY\020\004*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005M" + "etSchemaAlterStatusRequest\032&.hbase.pb.Ge" +
"ERGE\020\0012\261)\n\rMasterService\022e\n\024GetSchemaAlt" + "tSchemaAlterStatusResponse\022b\n\023GetTableDe" +
"erStatus\022%.hbase.pb.GetSchemaAlterStatus" + "scriptors\022$.hbase.pb.GetTableDescriptors" +
"Request\032&.hbase.pb.GetSchemaAlterStatusR" + "Request\032%.hbase.pb.GetTableDescriptorsRe" +
"esponse\022b\n\023GetTableDescriptors\022$.hbase.p" + "sponse\022P\n\rGetTableNames\022\036.hbase.pb.GetTa" +
"b.GetTableDescriptorsRequest\032%.hbase.pb." + "bleNamesRequest\032\037.hbase.pb.GetTableNames",
"GetTableDescriptorsResponse\022P\n\rGetTableN" + "Response\022Y\n\020GetClusterStatus\022!.hbase.pb." +
"ames\022\036.hbase.pb.GetTableNamesRequest\032\037.h" + "GetClusterStatusRequest\032\".hbase.pb.GetCl" +
"base.pb.GetTableNamesResponse\022Y\n\020GetClus", "usterStatusResponse\022V\n\017IsMasterRunning\022 " +
"terStatus\022!.hbase.pb.GetClusterStatusReq" + ".hbase.pb.IsMasterRunningRequest\032!.hbase" +
"uest\032\".hbase.pb.GetClusterStatusResponse" + ".pb.IsMasterRunningResponse\022D\n\tAddColumn" +
"\022V\n\017IsMasterRunning\022 .hbase.pb.IsMasterR" + "\022\032.hbase.pb.AddColumnRequest\032\033.hbase.pb." +
"unningRequest\032!.hbase.pb.IsMasterRunning" + "AddColumnResponse\022M\n\014DeleteColumn\022\035.hbas" +
"Response\022D\n\tAddColumn\022\032.hbase.pb.AddColu" + "e.pb.DeleteColumnRequest\032\036.hbase.pb.Dele" +
"mnRequest\032\033.hbase.pb.AddColumnResponse\022M" + "teColumnResponse\022M\n\014ModifyColumn\022\035.hbase" +
"\n\014DeleteColumn\022\035.hbase.pb.DeleteColumnRe" + ".pb.ModifyColumnRequest\032\036.hbase.pb.Modif",
"quest\032\036.hbase.pb.DeleteColumnResponse\022M\n" + "yColumnResponse\022G\n\nMoveRegion\022\033.hbase.pb" +
"\014ModifyColumn\022\035.hbase.pb.ModifyColumnReq" + ".MoveRegionRequest\032\034.hbase.pb.MoveRegion" +
"uest\032\036.hbase.pb.ModifyColumnResponse\022G\n\n", "Response\022k\n\026DispatchMergingRegions\022\'.hba" +
"MoveRegion\022\033.hbase.pb.MoveRegionRequest\032" + "se.pb.DispatchMergingRegionsRequest\032(.hb" +
"\034.hbase.pb.MoveRegionResponse\022k\n\026Dispatc" + "ase.pb.DispatchMergingRegionsResponse\022\\\n" +
"hMergingRegions\022\'.hbase.pb.DispatchMergi" + "\021MergeTableRegions\022\".hbase.pb.MergeTable" +
"ngRegionsRequest\032(.hbase.pb.DispatchMerg" + "RegionsRequest\032#.hbase.pb.MergeTableRegi" +
"ingRegionsResponse\022\\\n\021MergeTableRegions\022" + "onsResponse\022M\n\014AssignRegion\022\035.hbase.pb.A" +
"\".hbase.pb.MergeTableRegionsRequest\032#.hb" + "ssignRegionRequest\032\036.hbase.pb.AssignRegi" +
"ase.pb.MergeTableRegionsResponse\022M\n\014Assi" + "onResponse\022S\n\016UnassignRegion\022\037.hbase.pb.",
"gnRegion\022\035.hbase.pb.AssignRegionRequest\032" + "UnassignRegionRequest\032 .hbase.pb.Unassig" +
"\036.hbase.pb.AssignRegionResponse\022S\n\016Unass" + "nRegionResponse\022P\n\rOfflineRegion\022\036.hbase" +
"ignRegion\022\037.hbase.pb.UnassignRegionReque", ".pb.OfflineRegionRequest\032\037.hbase.pb.Offl" +
"st\032 .hbase.pb.UnassignRegionResponse\022P\n\r" + "ineRegionResponse\022J\n\013DeleteTable\022\034.hbase" +
"OfflineRegion\022\036.hbase.pb.OfflineRegionRe" + ".pb.DeleteTableRequest\032\035.hbase.pb.Delete" +
"quest\032\037.hbase.pb.OfflineRegionResponse\022J" + "TableResponse\022P\n\rtruncateTable\022\036.hbase.p" +
"\n\013DeleteTable\022\034.hbase.pb.DeleteTableRequ" + "b.TruncateTableRequest\032\037.hbase.pb.Trunca" +
"est\032\035.hbase.pb.DeleteTableResponse\022P\n\rtr" + "teTableResponse\022J\n\013EnableTable\022\034.hbase.p" +
"uncateTable\022\036.hbase.pb.TruncateTableRequ" + "b.EnableTableRequest\032\035.hbase.pb.EnableTa" +
"est\032\037.hbase.pb.TruncateTableResponse\022J\n\013" + "bleResponse\022M\n\014DisableTable\022\035.hbase.pb.D",
"EnableTable\022\034.hbase.pb.EnableTableReques" + "isableTableRequest\032\036.hbase.pb.DisableTab" +
"t\032\035.hbase.pb.EnableTableResponse\022M\n\014Disa" + "leResponse\022J\n\013ModifyTable\022\034.hbase.pb.Mod" +
"bleTable\022\035.hbase.pb.DisableTableRequest\032", "ifyTableRequest\032\035.hbase.pb.ModifyTableRe" +
"\036.hbase.pb.DisableTableResponse\022J\n\013Modif" + "sponse\022J\n\013CreateTable\022\034.hbase.pb.CreateT" +
"yTable\022\034.hbase.pb.ModifyTableRequest\032\035.h" + "ableRequest\032\035.hbase.pb.CreateTableRespon" +
"base.pb.ModifyTableResponse\022J\n\013CreateTab" + "se\022A\n\010Shutdown\022\031.hbase.pb.ShutdownReques" +
"le\022\034.hbase.pb.CreateTableRequest\032\035.hbase" + "t\032\032.hbase.pb.ShutdownResponse\022G\n\nStopMas" +
".pb.CreateTableResponse\022A\n\010Shutdown\022\031.hb" + "ter\022\033.hbase.pb.StopMasterRequest\032\034.hbase" +
"ase.pb.ShutdownRequest\032\032.hbase.pb.Shutdo" + ".pb.StopMasterResponse\022h\n\031IsMasterInMain" +
"wnResponse\022G\n\nStopMaster\022\033.hbase.pb.Stop" + "tenanceMode\022$.hbase.pb.IsInMaintenanceMo",
"MasterRequest\032\034.hbase.pb.StopMasterRespo" + "deRequest\032%.hbase.pb.IsInMaintenanceMode" +
"nse\022h\n\031IsMasterInMaintenanceMode\022$.hbase" + "Response\022>\n\007Balance\022\030.hbase.pb.BalanceRe" +
".pb.IsInMaintenanceModeRequest\032%.hbase.p", "quest\032\031.hbase.pb.BalanceResponse\022_\n\022SetB" +
"b.IsInMaintenanceModeResponse\022>\n\007Balance" + "alancerRunning\022#.hbase.pb.SetBalancerRun" +
"\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.Ba" + "ningRequest\032$.hbase.pb.SetBalancerRunnin" +
"lanceResponse\022_\n\022SetBalancerRunning\022#.hb" + "gResponse\022\\\n\021IsBalancerEnabled\022\".hbase.p" +
"ase.pb.SetBalancerRunningRequest\032$.hbase" + "b.IsBalancerEnabledRequest\032#.hbase.pb.Is" +
".pb.SetBalancerRunningResponse\022\\\n\021IsBala" + "BalancerEnabledResponse\022k\n\026SetSplitOrMer" +
"ncerEnabled\022\".hbase.pb.IsBalancerEnabled" + "geEnabled\022\'.hbase.pb.SetSplitOrMergeEnab" +
"Request\032#.hbase.pb.IsBalancerEnabledResp" + "ledRequest\032(.hbase.pb.SetSplitOrMergeEna",
"onse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.p" + "bledResponse\022h\n\025IsSplitOrMergeEnabled\022&." +
"b.SetSplitOrMergeEnabledRequest\032(.hbase." + "hbase.pb.IsSplitOrMergeEnabledRequest\032\'." +
"pb.SetSplitOrMergeEnabledResponse\022h\n\025IsS", "hbase.pb.IsSplitOrMergeEnabledResponse\022D" +
"plitOrMergeEnabled\022&.hbase.pb.IsSplitOrM" + "\n\tNormalize\022\032.hbase.pb.NormalizeRequest\032" +
"ergeEnabledRequest\032\'.hbase.pb.IsSplitOrM" + "\033.hbase.pb.NormalizeResponse\022e\n\024SetNorma" +
"ergeEnabledResponse\022D\n\tNormalize\022\032.hbase" + "lizerRunning\022%.hbase.pb.SetNormalizerRun" +
".pb.NormalizeRequest\032\033.hbase.pb.Normaliz" + "ningRequest\032&.hbase.pb.SetNormalizerRunn" +
"eResponse\022e\n\024SetNormalizerRunning\022%.hbas" + "ingResponse\022b\n\023IsNormalizerEnabled\022$.hba" +
"e.pb.SetNormalizerRunningRequest\032&.hbase" + "se.pb.IsNormalizerEnabledRequest\032%.hbase" +
".pb.SetNormalizerRunningResponse\022b\n\023IsNo" + ".pb.IsNormalizerEnabledResponse\022S\n\016RunCa",
"rmalizerEnabled\022$.hbase.pb.IsNormalizerE" + "talogScan\022\037.hbase.pb.RunCatalogScanReque" +
"nabledRequest\032%.hbase.pb.IsNormalizerEna" + "st\032 .hbase.pb.RunCatalogScanResponse\022e\n\024" +
"bledResponse\022S\n\016RunCatalogScan\022\037.hbase.p", "EnableCatalogJanitor\022%.hbase.pb.EnableCa" +
"b.RunCatalogScanRequest\032 .hbase.pb.RunCa" + "talogJanitorRequest\032&.hbase.pb.EnableCat" +
"talogScanResponse\022e\n\024EnableCatalogJanito" + "alogJanitorResponse\022n\n\027IsCatalogJanitorE" +
"r\022%.hbase.pb.EnableCatalogJanitorRequest" + "nabled\022(.hbase.pb.IsCatalogJanitorEnable" +
"\032&.hbase.pb.EnableCatalogJanitorResponse" + "dRequest\032).hbase.pb.IsCatalogJanitorEnab" +
"\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.I" + "ledResponse\022^\n\021ExecMasterService\022#.hbase" +
"sCatalogJanitorEnabledRequest\032).hbase.pb" + ".pb.CoprocessorServiceRequest\032$.hbase.pb" +
".IsCatalogJanitorEnabledResponse\022^\n\021Exec" + ".CoprocessorServiceResponse\022A\n\010Snapshot\022",
"MasterService\022#.hbase.pb.CoprocessorServ" + "\031.hbase.pb.SnapshotRequest\032\032.hbase.pb.Sn" +
"iceRequest\032$.hbase.pb.CoprocessorService" + "apshotResponse\022h\n\025GetCompletedSnapshots\022" +
"Response\022A\n\010Snapshot\022\031.hbase.pb.Snapshot", "&.hbase.pb.GetCompletedSnapshotsRequest\032" +
"Request\032\032.hbase.pb.SnapshotResponse\022h\n\025G" + "\'.hbase.pb.GetCompletedSnapshotsResponse" +
"etCompletedSnapshots\022&.hbase.pb.GetCompl" + "\022S\n\016DeleteSnapshot\022\037.hbase.pb.DeleteSnap" +
"etedSnapshotsRequest\032\'.hbase.pb.GetCompl" + "shotRequest\032 .hbase.pb.DeleteSnapshotRes" +
"etedSnapshotsResponse\022S\n\016DeleteSnapshot\022" + "ponse\022S\n\016IsSnapshotDone\022\037.hbase.pb.IsSna" +
"\037.hbase.pb.DeleteSnapshotRequest\032 .hbase" + "pshotDoneRequest\032 .hbase.pb.IsSnapshotDo" +
".pb.DeleteSnapshotResponse\022S\n\016IsSnapshot" + "neResponse\022V\n\017RestoreSnapshot\022 .hbase.pb" +
"Done\022\037.hbase.pb.IsSnapshotDoneRequest\032 ." + ".RestoreSnapshotRequest\032!.hbase.pb.Resto",
"hbase.pb.IsSnapshotDoneResponse\022V\n\017Resto" + "reSnapshotResponse\022P\n\rExecProcedure\022\036.hb" +
"reSnapshot\022 .hbase.pb.RestoreSnapshotReq" + "ase.pb.ExecProcedureRequest\032\037.hbase.pb.E" +
"uest\032!.hbase.pb.RestoreSnapshotResponse\022", "xecProcedureResponse\022W\n\024ExecProcedureWit" +
"P\n\rExecProcedure\022\036.hbase.pb.ExecProcedur" + "hRet\022\036.hbase.pb.ExecProcedureRequest\032\037.h" +
"eRequest\032\037.hbase.pb.ExecProcedureRespons" + "base.pb.ExecProcedureResponse\022V\n\017IsProce" +
"e\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Exe" + "dureDone\022 .hbase.pb.IsProcedureDoneReque" +
"cProcedureRequest\032\037.hbase.pb.ExecProcedu" + "st\032!.hbase.pb.IsProcedureDoneResponse\022V\n" +
"reResponse\022V\n\017IsProcedureDone\022 .hbase.pb" + "\017ModifyNamespace\022 .hbase.pb.ModifyNamesp" +
".IsProcedureDoneRequest\032!.hbase.pb.IsPro" + "aceRequest\032!.hbase.pb.ModifyNamespaceRes" +
"cedureDoneResponse\022V\n\017ModifyNamespace\022 ." + "ponse\022V\n\017CreateNamespace\022 .hbase.pb.Crea",
"hbase.pb.ModifyNamespaceRequest\032!.hbase." + "teNamespaceRequest\032!.hbase.pb.CreateName" +
"pb.ModifyNamespaceResponse\022V\n\017CreateName" + "spaceResponse\022V\n\017DeleteNamespace\022 .hbase" +
"space\022 .hbase.pb.CreateNamespaceRequest\032", ".pb.DeleteNamespaceRequest\032!.hbase.pb.De" +
"!.hbase.pb.CreateNamespaceResponse\022V\n\017De" + "leteNamespaceResponse\022k\n\026GetNamespaceDes" +
"leteNamespace\022 .hbase.pb.DeleteNamespace" + "criptor\022\'.hbase.pb.GetNamespaceDescripto" +
"Request\032!.hbase.pb.DeleteNamespaceRespon" + "rRequest\032(.hbase.pb.GetNamespaceDescript" +
"se\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb." + "orResponse\022q\n\030ListNamespaceDescriptors\022)" +
"GetNamespaceDescriptorRequest\032(.hbase.pb" + ".hbase.pb.ListNamespaceDescriptorsReques" +
".GetNamespaceDescriptorResponse\022q\n\030ListN" + "t\032*.hbase.pb.ListNamespaceDescriptorsRes" +
"amespaceDescriptors\022).hbase.pb.ListNames" + "ponse\022\206\001\n\037ListTableDescriptorsByNamespac",
"paceDescriptorsRequest\032*.hbase.pb.ListNa" + "e\0220.hbase.pb.ListTableDescriptorsByNames" +
"mespaceDescriptorsResponse\022\206\001\n\037ListTable" + "paceRequest\0321.hbase.pb.ListTableDescript" +
"DescriptorsByNamespace\0220.hbase.pb.ListTa", "orsByNamespaceResponse\022t\n\031ListTableNames" +
"bleDescriptorsByNamespaceRequest\0321.hbase" + "ByNamespace\022*.hbase.pb.ListTableNamesByN" +
".pb.ListTableDescriptorsByNamespaceRespo" + "amespaceRequest\032+.hbase.pb.ListTableName" +
"nse\022t\n\031ListTableNamesByNamespace\022*.hbase" + "sByNamespaceResponse\022P\n\rGetTableState\022\036." +
".pb.ListTableNamesByNamespaceRequest\032+.h" + "hbase.pb.GetTableStateRequest\032\037.hbase.pb" +
"base.pb.ListTableNamesByNamespaceRespons" + ".GetTableStateResponse\022A\n\010SetQuota\022\031.hba" +
"e\022P\n\rGetTableState\022\036.hbase.pb.GetTableSt" + "se.pb.SetQuotaRequest\032\032.hbase.pb.SetQuot" +
"ateRequest\032\037.hbase.pb.GetTableStateRespo" + "aResponse\022x\n\037getLastMajorCompactionTimes",
"nse\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaReque" + "tamp\022).hbase.pb.MajorCompactionTimestamp" +
"st\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLas" + "Request\032*.hbase.pb.MajorCompactionTimest" +
"tMajorCompactionTimestamp\022).hbase.pb.Maj", "ampResponse\022\212\001\n(getLastMajorCompactionTi" +
"orCompactionTimestampRequest\032*.hbase.pb." + "mestampForRegion\0222.hbase.pb.MajorCompact" +
"MajorCompactionTimestampResponse\022\212\001\n(get" + "ionTimestampForRegionRequest\032*.hbase.pb." +
"LastMajorCompactionTimestampForRegion\0222." + "MajorCompactionTimestampResponse\022_\n\022getP" +
"hbase.pb.MajorCompactionTimestampForRegi" + "rocedureResult\022#.hbase.pb.GetProcedureRe" +
"onRequest\032*.hbase.pb.MajorCompactionTime" + "sultRequest\032$.hbase.pb.GetProcedureResul" +
"stampResponse\022_\n\022getProcedureResult\022#.hb" + "tResponse\022h\n\027getSecurityCapabilities\022%.h" +
"ase.pb.GetProcedureResultRequest\032$.hbase" + "base.pb.SecurityCapabilitiesRequest\032&.hb",
".pb.GetProcedureResultResponse\022h\n\027getSec" + "ase.pb.SecurityCapabilitiesResponse\022S\n\016A" +
"urityCapabilities\022%.hbase.pb.SecurityCap" + "bortProcedure\022\037.hbase.pb.AbortProcedureR" +
"abilitiesRequest\032&.hbase.pb.SecurityCapa", "equest\032 .hbase.pb.AbortProcedureResponse" +
"bilitiesResponse\022S\n\016AbortProcedure\022\037.hba" + "\022S\n\016ListProcedures\022\037.hbase.pb.ListProced" +
"se.pb.AbortProcedureRequest\032 .hbase.pb.A" + "uresRequest\032 .hbase.pb.ListProceduresRes" +
"bortProcedureResponse\022S\n\016ListProcedures\022" + "ponse\022_\n\022AddReplicationPeer\022#.hbase.pb.A" +
"\037.hbase.pb.ListProceduresRequest\032 .hbase" + "ddReplicationPeerRequest\032$.hbase.pb.AddR" +
".pb.ListProceduresResponseBI\n1org.apache" + "eplicationPeerResponse\022h\n\025RemoveReplicat" +
".hadoop.hbase.shaded.protobuf.generatedB" + "ionPeer\022&.hbase.pb.RemoveReplicationPeer" +
"\014MasterProtosH\001\210\001\001\240\001\001" "Request\032\'.hbase.pb.RemoveReplicationPeer",
"ResponseBI\n1org.apache.hadoop.hbase.shad" +
"ed.protobuf.generatedB\014MasterProtosH\001\210\001\001" +
"\240\001\001"
}; };
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
@ -71341,6 +71505,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.getDescriptor(), org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.getDescriptor(),
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor(), org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor(),
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor(), org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor(),
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.getDescriptor(),
}, assigner); }, assigner);
internal_static_hbase_pb_AddColumnRequest_descriptor = internal_static_hbase_pb_AddColumnRequest_descriptor =
getDescriptor().getMessageTypes().get(0); getDescriptor().getMessageTypes().get(0);
@ -72026,6 +72191,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.getDescriptor();
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor();
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor();
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.getDescriptor();
} }
// @@protoc_insertion_point(outer_class_scope) // @@protoc_insertion_point(outer_class_scope)

View File

@ -32,6 +32,7 @@ import "ClusterStatus.proto";
import "ErrorHandling.proto"; import "ErrorHandling.proto";
import "Procedure.proto"; import "Procedure.proto";
import "Quota.proto"; import "Quota.proto";
import "Replication.proto";
/* Column-level protobufs */ /* Column-level protobufs */
@ -846,4 +847,12 @@ service MasterService {
/** returns a list of procedures */ /** returns a list of procedures */
rpc ListProcedures(ListProceduresRequest) rpc ListProcedures(ListProceduresRequest)
returns(ListProceduresResponse); returns(ListProceduresResponse);
/** Add a replication peer */
rpc AddReplicationPeer(AddReplicationPeerRequest)
returns(AddReplicationPeerResponse);
/** Remove a replication peer */
rpc RemoveReplicationPeer(RemoveReplicationPeerRequest)
returns(RemoveReplicationPeerResponse);
} }

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package hbase.pb;
option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
option java_outer_classname = "ReplicationProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "ZooKeeper.proto";
message AddReplicationPeerRequest {
required string peer_id = 1;
required ReplicationPeer peer_config = 2;
}
message AddReplicationPeerResponse {
}
message RemoveReplicationPeerRequest {
required string peer_id = 1;
}
message RemoveReplicationPeerResponse {
}

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
@ -1827,4 +1828,45 @@ public interface MasterObserver extends Coprocessor {
void postBalanceRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx, void postBalanceRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName, boolean balancerRan) throws IOException; String groupName, boolean balancerRan) throws IOException;
/**
* Called before add a replication peer
* @param ctx the environment to interact with the framework and master
* @param peerId a short name that identifies the peer
* @param peerConfig configuration for the replication peer
* @throws IOException on failure
*/
default void preAddReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId, ReplicationPeerConfig peerConfig) throws IOException {
}
/**
* Called after add a replication peer
* @param ctx the environment to interact with the framework and master
* @param peerId a short name that identifies the peer
* @param peerConfig configuration for the replication peer
* @throws IOException on failure
*/
default void postAddReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId, ReplicationPeerConfig peerConfig) throws IOException {
}
/**
* Called before remove a replication peer
* @param ctx
* @param peerId a short name that identifies the peer
* @throws IOException on failure
*/
default void preRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
}
/**
* Called after remove a replication peer
* @param ctx
* @param peerId a short name that identifies the peer
* @throws IOException on failure
*/
default void postRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
}
} }

View File

@ -119,6 +119,7 @@ import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure; import org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure;
import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure; import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
import org.apache.hadoop.hbase.master.replication.ReplicationManager;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
@ -138,7 +139,12 @@ import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeers;
import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl; import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
import org.apache.hadoop.hbase.replication.master.TableCFsUpdater; import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.replication.regionserver.Replication;
@ -291,6 +297,9 @@ public class HMaster extends HRegionServer implements MasterServices {
// manager of assignment nodes in zookeeper // manager of assignment nodes in zookeeper
private AssignmentManager assignmentManager; private AssignmentManager assignmentManager;
// manager of replication
private ReplicationManager replicationManager;
// buffer for "fatal error" notices from region servers // buffer for "fatal error" notices from region servers
// in the cluster. This is only used for assisting // in the cluster. This is only used for assisting
// operations/debugging. // operations/debugging.
@ -640,6 +649,8 @@ public class HMaster extends HRegionServer implements MasterServices {
this.balancer, this.service, this.metricsMaster, this.balancer, this.service, this.metricsMaster,
this.tableLockManager, tableStateManager); this.tableLockManager, tableStateManager);
this.replicationManager = new ReplicationManager(conf, zooKeeper, this);
this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager); this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager);
this.regionServerTracker.start(); this.regionServerTracker.start();
@ -3135,4 +3146,30 @@ public class HMaster extends HRegionServer implements MasterServices {
public FavoredNodesManager getFavoredNodesManager() { public FavoredNodesManager getFavoredNodesManager() {
return favoredNodesManager; return favoredNodesManager;
} }
@Override
public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
throws ReplicationException, IOException {
if (cpHost != null) {
cpHost.preAddReplicationPeer(peerId, peerConfig);
}
LOG.info(getClientIdAuditPrefix() + " creating replication peer, id=" + peerId + ", config="
+ peerConfig);
this.replicationManager.addReplicationPeer(peerId, peerConfig);
if (cpHost != null) {
cpHost.postAddReplicationPeer(peerId, peerConfig);
}
}
@Override
public void removeReplicationPeer(String peerId) throws ReplicationException, IOException {
if (cpHost != null) {
cpHost.preRemoveReplicationPeer(peerId);
}
LOG.info(getClientIdAuditPrefix() + " removing replication peer, id=" + peerId);
this.replicationManager.removeReplicationPeer(peerId);
if (cpHost != null) {
cpHost.postRemoveReplicationPeer(peerId);
}
}
} }

View File

@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
@ -1645,4 +1646,45 @@ public class MasterCoprocessorHost
}); });
} }
public void preAddReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
observer.preAddReplicationPeer(ctx, peerId, peerConfig);
}
});
}
public void postAddReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
observer.postAddReplicationPeer(ctx, peerId, peerConfig);
}
});
}
public void preRemoveReplicationPeer(final String peerId) throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
observer.preRemoveReplicationPeer(ctx, peerId);
}
});
}
public void postRemoveReplicationPeer(final String peerId) throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
observer.postRemoveReplicationPeer(ctx, peerId);
}
});
}
} }

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
@ -86,7 +87,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessController; import org.apache.hadoop.hbase.security.access.AccessController;
import org.apache.hadoop.hbase.security.visibility.VisibilityController; import org.apache.hadoop.hbase.security.visibility.VisibilityController;
@ -1638,4 +1644,27 @@ public class MasterRpcServices extends RSRpcServices
} }
return null; return null;
} }
@Override
public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
AddReplicationPeerRequest request) throws ServiceException {
try {
master.addReplicationPeer(request.getPeerId(),
ReplicationSerDeHelper.convert(request.getPeerConfig()));
return AddReplicationPeerResponse.newBuilder().build();
} catch (ReplicationException | IOException e) {
throw new ServiceException(e);
}
}
@Override
public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
RemoveReplicationPeerRequest request) throws ServiceException {
try {
master.removeReplicationPeer(request.getPeerId());
return RemoveReplicationPeerResponse.newBuilder().build();
} catch (ReplicationException | IOException e) {
throw new ServiceException(e);
}
}
} }

View File

@ -39,6 +39,8 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import com.google.protobuf.Service; import com.google.protobuf.Service;
@ -415,4 +417,18 @@ public interface MasterServices extends Server {
* @return Favored Nodes Manager * @return Favored Nodes Manager
*/ */
public FavoredNodesManager getFavoredNodesManager(); public FavoredNodesManager getFavoredNodesManager();
/**
* Add a new replication peer for replicating data to slave cluster
* @param peerId a short name that identifies the peer
* @param peerConfig configuration for the replication slave cluster
*/
void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
throws ReplicationException, IOException;
/**
* Removes a peer and stops the replication
* @param peerId a short name that identifies the peer
*/
void removeReplicationPeer(String peerId) throws ReplicationException, IOException;
} }

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.replication;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeers;
import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
/**
* Manages and performs all replication admin operations.
* Used to add/remove a replication peer.
*/
@InterfaceAudience.Private
public class ReplicationManager {
private final Configuration conf;
private final ZooKeeperWatcher zkw;
private final ReplicationQueuesClient replicationQueuesClient;
private final ReplicationPeers replicationPeers;
public ReplicationManager(Configuration conf, ZooKeeperWatcher zkw, Abortable abortable)
throws IOException {
this.conf = conf;
this.zkw = zkw;
try {
this.replicationQueuesClient = ReplicationFactory
.getReplicationQueuesClient(new ReplicationQueuesClientArguments(conf, abortable, zkw));
this.replicationQueuesClient.init();
this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf,
this.replicationQueuesClient, abortable);
this.replicationPeers.init();
} catch (Exception e) {
throw new IOException("Failed to construct ReplicationManager", e);
}
}
public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
throws ReplicationException {
checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
peerConfig.getTableCFsMap());
this.replicationPeers.registerPeer(peerId, peerConfig);
}
public void removeReplicationPeer(String peerId) throws ReplicationException {
this.replicationPeers.unregisterPeer(peerId);
}
/**
* Set a namespace in the peer config means that all tables in this namespace
* will be replicated to the peer cluster.
*
* 1. If you already have set a namespace in the peer config, then you can't set any table
* of this namespace to the peer config.
* 2. If you already have set a table in the peer config, then you can't set this table's
* namespace to the peer config.
*
* @param namespaces
* @param tableCfs
* @throws ReplicationException
*/
private void checkNamespacesAndTableCfsConfigConflict(Set<String> namespaces,
Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException {
if (namespaces == null || namespaces.isEmpty()) {
return;
}
if (tableCfs == null || tableCfs.isEmpty()) {
return;
}
for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
TableName table = entry.getKey();
if (namespaces.contains(table.getNamespaceAsString())) {
throw new ReplicationException(
"Table-cfs config conflict with namespaces config in peer");
}
}
}
}

View File

@ -105,6 +105,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext;
import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
@ -2695,4 +2696,16 @@ public class AccessController extends BaseMasterAndRegionObserver
String groupName) throws IOException { String groupName) throws IOException {
requirePermission(getActiveUser(ctx), "balanceRSGroup", Action.ADMIN); requirePermission(getActiveUser(ctx), "balanceRSGroup", Action.ADMIN);
} }
@Override
public void preAddReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId, ReplicationPeerConfig peerConfig) throws IOException {
requirePermission(getActiveUser(ctx), "addReplicationPeer", Action.ADMIN);
}
@Override
public void preRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
requirePermission(getActiveUser(ctx), "removeReplicationPeer", Action.ADMIN);
}
} }

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationPeer; import org.apache.hadoop.hbase.replication.ReplicationPeer;
@ -76,8 +77,9 @@ public class TestReplicationAdmin {
*/ */
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniZKCluster(); TEST_UTIL.startMiniCluster();
Configuration conf = TEST_UTIL.getConfiguration(); Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
admin = new ReplicationAdmin(conf); admin = new ReplicationAdmin(conf);
} }
@ -86,7 +88,7 @@ public class TestReplicationAdmin {
if (admin != null) { if (admin != null) {
admin.close(); admin.close();
} }
TEST_UTIL.shutdownMiniZKCluster(); TEST_UTIL.shutdownMiniCluster();
} }
/** /**
@ -105,7 +107,7 @@ public class TestReplicationAdmin {
// try adding the same (fails) // try adding the same (fails)
try { try {
admin.addPeer(ID_ONE, rpc1, null); admin.addPeer(ID_ONE, rpc1, null);
} catch (IllegalArgumentException iae) { } catch (Exception e) {
// OK! // OK!
} }
assertEquals(1, admin.getPeersCount()); assertEquals(1, admin.getPeersCount());
@ -113,14 +115,14 @@ public class TestReplicationAdmin {
try { try {
admin.removePeer(ID_SECOND); admin.removePeer(ID_SECOND);
fail(); fail();
} catch (IllegalArgumentException iae) { } catch (Exception iae) {
// OK! // OK!
} }
assertEquals(1, admin.getPeersCount()); assertEquals(1, admin.getPeersCount());
// Add a second since multi-slave is supported // Add a second since multi-slave is supported
try { try {
admin.addPeer(ID_SECOND, rpc2, null); admin.addPeer(ID_SECOND, rpc2, null);
} catch (IllegalStateException iae) { } catch (Exception iae) {
fail(); fail();
} }
assertEquals(2, admin.getPeersCount()); assertEquals(2, admin.getPeersCount());
@ -170,7 +172,7 @@ public class TestReplicationAdmin {
try { try {
admin.addPeer(ID_ONE, rpc1, null); admin.addPeer(ID_ONE, rpc1, null);
fail(); fail();
} catch (ReplicationException e) { } catch (Exception e) {
// OK! // OK!
} }
repQueues.removeQueue(ID_ONE); repQueues.removeQueue(ID_ONE);
@ -181,7 +183,7 @@ public class TestReplicationAdmin {
try { try {
admin.addPeer(ID_ONE, rpc2, null); admin.addPeer(ID_ONE, rpc2, null);
fail(); fail();
} catch (ReplicationException e) { } catch (Exception e) {
// OK! // OK!
} }
repQueues.removeAllQueues(); repQueues.removeAllQueues();
@ -422,7 +424,7 @@ public class TestReplicationAdmin {
} }
@Test @Test
public void testNamespacesAndTableCfsConfigConflict() throws ReplicationException { public void testNamespacesAndTableCfsConfigConflict() throws Exception {
String ns1 = "ns1"; String ns1 = "ns1";
String ns2 = "ns2"; String ns2 = "ns2";
TableName tab1 = TableName.valueOf("ns1:tabl"); TableName tab1 = TableName.valueOf("ns1:tabl");
@ -471,7 +473,7 @@ public class TestReplicationAdmin {
} }
@Test @Test
public void testPeerBandwidth() throws ReplicationException { public void testPeerBandwidth() throws Exception {
ReplicationPeerConfig rpc = new ReplicationPeerConfig(); ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(KEY_ONE); rpc.setClusterKey(KEY_ONE);
admin.addPeer(ID_ONE, rpc); admin.addPeer(ID_ONE, rpc);

View File

@ -40,6 +40,8 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -380,4 +382,13 @@ public class MockNoopMasterServices implements MasterServices, Server {
public MasterProcedureManagerHost getMasterProcedureManagerHost() { public MasterProcedureManagerHost getMasterProcedureManagerHost() {
return null; return null;
} }
@Override
public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
throws ReplicationException {
}
@Override
public void removeReplicationPeer(String peerId) throws ReplicationException {
}
} }

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -275,8 +276,8 @@ public class TestMasterNoCluster {
void initClusterSchemaService() throws IOException, InterruptedException {} void initClusterSchemaService() throws IOException, InterruptedException {}
@Override @Override
void initializeZKBasedSystemTrackers() throws IOException, void initializeZKBasedSystemTrackers() throws IOException, InterruptedException,
InterruptedException, KeeperException, CoordinatedStateException { KeeperException, CoordinatedStateException {
super.initializeZKBasedSystemTrackers(); super.initializeZKBasedSystemTrackers();
// Record a newer server in server manager at first // Record a newer server in server manager at first
getServerManager().recordNewServerWithLock(newServer, ServerLoad.EMPTY_SERVERLOAD); getServerManager().recordNewServerWithLock(newServer, ServerLoad.EMPTY_SERVERLOAD);

View File

@ -123,18 +123,18 @@ public class TestReplicationBase {
utility2 = new HBaseTestingUtility(conf2); utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK); utility2.setZkCluster(miniZK);
zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true); zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
admin.addPeer("2", rpc, null);
LOG.info("Setup second Zk"); LOG.info("Setup second Zk");
CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1); CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
utility1.startMiniCluster(2); utility1.startMiniCluster(2);
// Have a bunch of slave servers, because inter-cluster shipping logic uses number of sinks // Have a bunch of slave servers, because inter-cluster shipping logic uses number of sinks
// as a component in deciding maximum number of parallel batches to send to the peer cluster. // as a component in deciding maximum number of parallel batches to send to the peer cluster.
utility2.startMiniCluster(4); utility2.startMiniCluster(4);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
admin.addPeer("2", rpc, null);
HTableDescriptor table = new HTableDescriptor(tableName); HTableDescriptor table = new HTableDescriptor(tableName);
HColumnDescriptor fam = new HColumnDescriptor(famName); HColumnDescriptor fam = new HColumnDescriptor(famName);
fam.setMaxVersions(100); fam.setMaxVersions(100);

View File

@ -130,14 +130,14 @@ public class TestReplicationWithTags {
utility2 = new HBaseTestingUtility(conf2); utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK); utility2.setZkCluster(miniZK);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
replicationAdmin.addPeer("2", rpc, null);
LOG.info("Setup second Zk"); LOG.info("Setup second Zk");
utility1.startMiniCluster(2); utility1.startMiniCluster(2);
utility2.startMiniCluster(2); utility2.startMiniCluster(2);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
replicationAdmin.addPeer("2", rpc, null);
HTableDescriptor table = new HTableDescriptor(TABLE_NAME); HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor fam = new HColumnDescriptor(FAMILY); HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
fam.setMaxVersions(3); fam.setMaxVersions(3);

View File

@ -106,14 +106,14 @@ public class TestSerialReplication {
utility2.setZkCluster(miniZK); utility2.setZkCluster(miniZK);
new ZooKeeperWatcher(conf2, "cluster2", null, true); new ZooKeeperWatcher(conf2, "cluster2", null, true);
utility1.startMiniCluster(1, 10);
utility2.startMiniCluster(1, 1);
ReplicationAdmin admin1 = new ReplicationAdmin(conf1); ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
ReplicationPeerConfig rpc = new ReplicationPeerConfig(); ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey()); rpc.setClusterKey(utility2.getClusterKey());
admin1.addPeer("1", rpc, null); admin1.addPeer("1", rpc, null);
utility1.startMiniCluster(1, 10);
utility2.startMiniCluster(1, 1);
utility1.getHBaseAdmin().setBalancerRunning(false, true); utility1.getHBaseAdmin().setBalancerRunning(false, true);
} }

View File

@ -2870,4 +2870,34 @@ public class TestAccessController extends SecureTestUtil {
verifyAllowed(action1, SUPERUSER, USER_ADMIN); verifyAllowed(action1, SUPERUSER, USER_ADMIN);
verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
} }
@Test
public void testAddReplicationPeer() throws Exception {
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preAddReplicationPeer(ObserverContext.createAndPrepare(CP_ENV, null),
"test", null);
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
@Test
public void testRemoveReplicationPeer() throws Exception {
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preRemoveReplicationPeer(ObserverContext.createAndPrepare(CP_ENV, null),
"test");
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
} }

View File

@ -128,14 +128,16 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit
TEST_UTIL1 = new HBaseTestingUtility(conf1); TEST_UTIL1 = new HBaseTestingUtility(conf1);
TEST_UTIL1.setZkCluster(miniZK); TEST_UTIL1.setZkCluster(miniZK);
zkw2 = new ZooKeeperWatcher(conf1, "cluster2", null, true); zkw2 = new ZooKeeperWatcher(conf1, "cluster2", null, true);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(TEST_UTIL1.getClusterKey());
replicationAdmin.addPeer("2", rpc, null);
TEST_UTIL.startMiniCluster(1); TEST_UTIL.startMiniCluster(1);
// Wait for the labels table to become available // Wait for the labels table to become available
TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
TEST_UTIL1.startMiniCluster(1); TEST_UTIL1.startMiniCluster(1);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(TEST_UTIL1.getClusterKey());
replicationAdmin.addPeer("2", rpc, null);
HTableDescriptor table = new HTableDescriptor(TABLE_NAME); HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor desc = new HColumnDescriptor(fam); HColumnDescriptor desc = new HColumnDescriptor(fam);
desc.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); desc.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);

View File

@ -177,14 +177,16 @@ public class TestVisibilityLabelsReplication {
TEST_UTIL1 = new HBaseTestingUtility(conf1); TEST_UTIL1 = new HBaseTestingUtility(conf1);
TEST_UTIL1.setZkCluster(miniZK); TEST_UTIL1.setZkCluster(miniZK);
zkw2 = new ZooKeeperWatcher(conf1, "cluster2", null, true); zkw2 = new ZooKeeperWatcher(conf1, "cluster2", null, true);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(TEST_UTIL1.getClusterKey());
replicationAdmin.addPeer("2", rpc, null);
TEST_UTIL.startMiniCluster(1); TEST_UTIL.startMiniCluster(1);
// Wait for the labels table to become available // Wait for the labels table to become available
TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
TEST_UTIL1.startMiniCluster(1); TEST_UTIL1.startMiniCluster(1);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(TEST_UTIL1.getClusterKey());
replicationAdmin.addPeer("2", rpc, null);
Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor table = new HTableDescriptor(TABLE_NAME); HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor desc = new HColumnDescriptor(fam); HColumnDescriptor desc = new HColumnDescriptor(fam);

View File

@ -116,6 +116,8 @@ In case the table goes out of date, the unit tests which check for accuracy of p
| | setUserQuota(Table level) | superuser\|global(A)\|NS(A)\|TableOwner\|table(A) | | setUserQuota(Table level) | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
| | setTableQuota | superuser\|global(A)\|NS(A)\|TableOwner\|table(A) | | setTableQuota | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
| | setNamespaceQuota | superuser\|global(A) | | setNamespaceQuota | superuser\|global(A)
| | addReplicationPeer | superuser\|global(A)
| | removeReplicationPeer | superuser\|global(A)
| Region | openRegion | superuser\|global(A) | Region | openRegion | superuser\|global(A)
| | closeRegion | superuser\|global(A) | | closeRegion | superuser\|global(A)
| | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C) | | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)