HBASE-17600 Implement get/create/modify/delete/list namespace admin operations
This commit is contained in:
parent
9214ad69af
commit
4ff838df13
|
@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
@ -253,6 +254,37 @@ public interface AsyncAdmin {
|
||||||
CompletableFuture<Void> modifyColumnFamily(final TableName tableName,
|
CompletableFuture<Void> modifyColumnFamily(final TableName tableName,
|
||||||
final HColumnDescriptor columnFamily);
|
final HColumnDescriptor columnFamily);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a new namespace.
|
||||||
|
* @param descriptor descriptor which describes the new namespace
|
||||||
|
*/
|
||||||
|
CompletableFuture<Void> createNamespace(final NamespaceDescriptor descriptor);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Modify an existing namespace.
|
||||||
|
* @param descriptor descriptor which describes the new namespace
|
||||||
|
*/
|
||||||
|
CompletableFuture<Void> modifyNamespace(final NamespaceDescriptor descriptor);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete an existing namespace. Only empty namespaces (no tables) can be removed.
|
||||||
|
* @param name namespace name
|
||||||
|
*/
|
||||||
|
CompletableFuture<Void> deleteNamespace(final String name);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a namespace descriptor by name
|
||||||
|
* @param name name of namespace descriptor
|
||||||
|
* @return A descriptor wrapped by a {@link CompletableFuture}.
|
||||||
|
*/
|
||||||
|
CompletableFuture<NamespaceDescriptor> getNamespaceDescriptor(final String name);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List available namespace descriptors
|
||||||
|
* @return List of descriptors wrapped by a {@link CompletableFuture}.
|
||||||
|
*/
|
||||||
|
CompletableFuture<NamespaceDescriptor[]> listNamespaceDescriptors();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Turn the load balancer on or off.
|
* Turn the load balancer on or off.
|
||||||
* @param on
|
* @param on
|
||||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||||
import org.apache.hadoop.hbase.RegionLocations;
|
import org.apache.hadoop.hbase.RegionLocations;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
@ -60,12 +61,18 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnR
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
|
||||||
|
@ -80,9 +87,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTabl
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
|
||||||
|
@ -208,9 +219,9 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
||||||
|
|
||||||
private <PREQ, PRESP> CompletableFuture<Void> procedureCall(PREQ preq,
|
private <PREQ, PRESP> CompletableFuture<Void> procedureCall(PREQ preq,
|
||||||
MasterRpcCall<PRESP, PREQ> rpcCall, Converter<Long, PRESP> respConverter,
|
MasterRpcCall<PRESP, PREQ> rpcCall, Converter<Long, PRESP> respConverter,
|
||||||
TableProcedureBiConsumer consumer) {
|
ProcedureBiConsumer consumer) {
|
||||||
CompletableFuture<Long> procFuture = this
|
CompletableFuture<Long> procFuture = this
|
||||||
.<Long>newMasterCaller()
|
.<Long> newMasterCaller()
|
||||||
.action(
|
.action(
|
||||||
(controller, stub) -> this.<PREQ, PRESP, Long> call(controller, stub, preq, rpcCall,
|
(controller, stub) -> this.<PREQ, PRESP, Long> call(controller, stub, preq, rpcCall,
|
||||||
respConverter)).call();
|
respConverter)).call();
|
||||||
|
@ -467,6 +478,54 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
||||||
(resp) -> resp.getProcId(), new ModifyColumnFamilyProcedureBiConsumer(this, tableName));
|
(resp) -> resp.getProcId(), new ModifyColumnFamilyProcedureBiConsumer(this, tableName));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CompletableFuture<Void> createNamespace(NamespaceDescriptor descriptor) {
|
||||||
|
return this.<CreateNamespaceRequest, CreateNamespaceResponse> procedureCall(
|
||||||
|
RequestConverter.buildCreateNamespaceRequest(descriptor),
|
||||||
|
(s, c, req, done) -> s.createNamespace(c, req, done), (resp) -> resp.getProcId(),
|
||||||
|
new CreateNamespaceProcedureBiConsumer(this, descriptor.getName()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CompletableFuture<Void> modifyNamespace(NamespaceDescriptor descriptor) {
|
||||||
|
return this.<ModifyNamespaceRequest, ModifyNamespaceResponse> procedureCall(
|
||||||
|
RequestConverter.buildModifyNamespaceRequest(descriptor),
|
||||||
|
(s, c, req, done) -> s.modifyNamespace(c, req, done), (resp) -> resp.getProcId(),
|
||||||
|
new ModifyNamespaceProcedureBiConsumer(this, descriptor.getName()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CompletableFuture<Void> deleteNamespace(String name) {
|
||||||
|
return this.<DeleteNamespaceRequest, DeleteNamespaceResponse> procedureCall(
|
||||||
|
RequestConverter.buildDeleteNamespaceRequest(name),
|
||||||
|
(s, c, req, done) -> s.deleteNamespace(c, req, done), (resp) -> resp.getProcId(),
|
||||||
|
new ModifyNamespaceProcedureBiConsumer(this, name));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CompletableFuture<NamespaceDescriptor> getNamespaceDescriptor(String name) {
|
||||||
|
return this
|
||||||
|
.<NamespaceDescriptor> newMasterCaller()
|
||||||
|
.action(
|
||||||
|
(controller, stub) -> this
|
||||||
|
.<GetNamespaceDescriptorRequest, GetNamespaceDescriptorResponse, NamespaceDescriptor> call(
|
||||||
|
controller, stub, RequestConverter.buildGetNamespaceDescriptorRequest(name), (s, c,
|
||||||
|
req, done) -> s.getNamespaceDescriptor(c, req, done), (resp) -> ProtobufUtil
|
||||||
|
.toNamespaceDescriptor(resp.getNamespaceDescriptor()))).call();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CompletableFuture<NamespaceDescriptor[]> listNamespaceDescriptors() {
|
||||||
|
return this
|
||||||
|
.<NamespaceDescriptor[]> newMasterCaller()
|
||||||
|
.action(
|
||||||
|
(controller, stub) -> this
|
||||||
|
.<ListNamespaceDescriptorsRequest, ListNamespaceDescriptorsResponse, NamespaceDescriptor[]> call(
|
||||||
|
controller, stub, ListNamespaceDescriptorsRequest.newBuilder().build(), (s, c, req,
|
||||||
|
done) -> s.listNamespaceDescriptors(c, req, done), (resp) -> ProtobufUtil
|
||||||
|
.getNamespaceDescriptorArray(resp))).call();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CompletableFuture<Boolean> setBalancerRunning(final boolean on) {
|
public CompletableFuture<Boolean> setBalancerRunning(final boolean on) {
|
||||||
return this
|
return this
|
||||||
|
@ -674,6 +733,31 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private abstract class NamespaceProcedureBiConsumer extends ProcedureBiConsumer {
|
||||||
|
protected final String namespaceName;
|
||||||
|
|
||||||
|
NamespaceProcedureBiConsumer(final AsyncAdmin admin, final String namespaceName) {
|
||||||
|
super(admin);
|
||||||
|
this.namespaceName = namespaceName;
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract String getOperationType();
|
||||||
|
|
||||||
|
String getDescription() {
|
||||||
|
return "Operation: " + getOperationType() + ", Namespace: " + namespaceName;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void onFinished() {
|
||||||
|
LOG.info(getDescription() + " completed");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void onError(Throwable error) {
|
||||||
|
LOG.info(getDescription() + " failed with " + error.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private class CreateTableProcedureBiConsumer extends TableProcedureBiConsumer {
|
private class CreateTableProcedureBiConsumer extends TableProcedureBiConsumer {
|
||||||
|
|
||||||
CreateTableProcedureBiConsumer(AsyncAdmin admin, TableName tableName) {
|
CreateTableProcedureBiConsumer(AsyncAdmin admin, TableName tableName) {
|
||||||
|
@ -768,6 +852,39 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private class CreateNamespaceProcedureBiConsumer extends NamespaceProcedureBiConsumer {
|
||||||
|
|
||||||
|
CreateNamespaceProcedureBiConsumer(AsyncAdmin admin, String namespaceName) {
|
||||||
|
super(admin, namespaceName);
|
||||||
|
}
|
||||||
|
|
||||||
|
String getOperationType() {
|
||||||
|
return "CREATE_NAMESPACE";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private class DeleteNamespaceProcedureBiConsumer extends NamespaceProcedureBiConsumer {
|
||||||
|
|
||||||
|
DeleteNamespaceProcedureBiConsumer(AsyncAdmin admin, String namespaceName) {
|
||||||
|
super(admin, namespaceName);
|
||||||
|
}
|
||||||
|
|
||||||
|
String getOperationType() {
|
||||||
|
return "DELETE_NAMESPACE";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private class ModifyNamespaceProcedureBiConsumer extends NamespaceProcedureBiConsumer {
|
||||||
|
|
||||||
|
ModifyNamespaceProcedureBiConsumer(AsyncAdmin admin, String namespaceName) {
|
||||||
|
super(admin, namespaceName);
|
||||||
|
}
|
||||||
|
|
||||||
|
String getOperationType() {
|
||||||
|
return "MODIFY_NAMESPACE";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private CompletableFuture<Void> waitProcedureResult(CompletableFuture<Long> procFuture) {
|
private CompletableFuture<Void> waitProcedureResult(CompletableFuture<Long> procFuture) {
|
||||||
CompletableFuture<Void> future = new CompletableFuture<>();
|
CompletableFuture<Void> future = new CompletableFuture<>();
|
||||||
procFuture.whenComplete((procId, error) -> {
|
procFuture.whenComplete((procId, error) -> {
|
||||||
|
|
|
@ -114,6 +114,10 @@ public abstract class AsyncRpcRetryingCaller<T> {
|
||||||
protected void onError(Throwable error, Supplier<String> errMsg,
|
protected void onError(Throwable error, Supplier<String> errMsg,
|
||||||
Consumer<Throwable> updateCachedLocation) {
|
Consumer<Throwable> updateCachedLocation) {
|
||||||
error = translateException(error);
|
error = translateException(error);
|
||||||
|
if (error instanceof DoNotRetryIOException) {
|
||||||
|
future.completeExceptionally(error);
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (tries > startLogErrorsCnt) {
|
if (tries > startLogErrorsCnt) {
|
||||||
LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts
|
LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts
|
||||||
+ ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs)
|
+ ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs)
|
||||||
|
@ -122,7 +126,7 @@ public abstract class AsyncRpcRetryingCaller<T> {
|
||||||
RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(
|
RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(
|
||||||
error, EnvironmentEdgeManager.currentTime(), "");
|
error, EnvironmentEdgeManager.currentTime(), "");
|
||||||
exceptions.add(qt);
|
exceptions.add(qt);
|
||||||
if (error instanceof DoNotRetryIOException || tries >= maxAttempts) {
|
if (tries >= maxAttempts) {
|
||||||
completeExceptionally();
|
completeExceptionally();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,6 +151,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
|
||||||
|
@ -393,6 +395,21 @@ public final class ProtobufUtil {
|
||||||
return ServerName.valueOf(hostName, port, startCode);
|
return ServerName.valueOf(hostName, port, startCode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get NamespaceDescriptor[] from ListNamespaceDescriptorsResponse protobuf
|
||||||
|
* @param proto the ListNamespaceDescriptorsResponse
|
||||||
|
* @return NamespaceDescriptor[]
|
||||||
|
*/
|
||||||
|
public static NamespaceDescriptor[] getNamespaceDescriptorArray(
|
||||||
|
ListNamespaceDescriptorsResponse proto) {
|
||||||
|
List<HBaseProtos.NamespaceDescriptor> list = proto.getNamespaceDescriptorList();
|
||||||
|
NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
|
||||||
|
for (int i = 0; i < list.size(); i++) {
|
||||||
|
res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get HTableDescriptor[] from GetTableDescriptorsResponse protobuf
|
* Get HTableDescriptor[] from GetTableDescriptorsResponse protobuf
|
||||||
*
|
*
|
||||||
|
@ -2136,11 +2153,9 @@ public final class ProtobufUtil {
|
||||||
return b.build();
|
return b.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static NamespaceDescriptor toNamespaceDescriptor(
|
public static NamespaceDescriptor toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor desc) {
|
||||||
HBaseProtos.NamespaceDescriptor desc) throws IOException {
|
NamespaceDescriptor.Builder b = NamespaceDescriptor.create(desc.getName().toStringUtf8());
|
||||||
NamespaceDescriptor.Builder b =
|
for (HBaseProtos.NameStringPair prop : desc.getConfigurationList()) {
|
||||||
NamespaceDescriptor.create(desc.getName().toStringUtf8());
|
|
||||||
for(HBaseProtos.NameStringPair prop : desc.getConfigurationList()) {
|
|
||||||
b.addConfiguration(prop.getName(), prop.getValue());
|
b.addConfiguration(prop.getName(), prop.getValue());
|
||||||
}
|
}
|
||||||
return b.build();
|
return b.build();
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
@ -80,11 +81,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
|
||||||
|
@ -1644,4 +1649,50 @@ public final class RequestConverter {
|
||||||
}
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a protocol buffer CreateNamespaceRequest
|
||||||
|
* @param descriptor
|
||||||
|
* @return a CreateNamespaceRequest
|
||||||
|
*/
|
||||||
|
public static CreateNamespaceRequest buildCreateNamespaceRequest(
|
||||||
|
final NamespaceDescriptor descriptor) {
|
||||||
|
CreateNamespaceRequest.Builder builder = CreateNamespaceRequest.newBuilder();
|
||||||
|
builder.setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor));
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a protocol buffer ModifyNamespaceRequest
|
||||||
|
* @param descriptor
|
||||||
|
* @return a ModifyNamespaceRequest
|
||||||
|
*/
|
||||||
|
public static ModifyNamespaceRequest buildModifyNamespaceRequest(
|
||||||
|
final NamespaceDescriptor descriptor) {
|
||||||
|
ModifyNamespaceRequest.Builder builder = ModifyNamespaceRequest.newBuilder();
|
||||||
|
builder.setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor));
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a protocol buffer DeleteNamespaceRequest
|
||||||
|
* @param name
|
||||||
|
* @return a DeleteNamespaceRequest
|
||||||
|
*/
|
||||||
|
public static DeleteNamespaceRequest buildDeleteNamespaceRequest(final String name) {
|
||||||
|
DeleteNamespaceRequest.Builder builder = DeleteNamespaceRequest.newBuilder();
|
||||||
|
builder.setNamespaceName(name);
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a protocol buffer GetNamespaceDescriptorRequest
|
||||||
|
* @param name
|
||||||
|
* @return a GetNamespaceDescriptorRequest
|
||||||
|
*/
|
||||||
|
public static GetNamespaceDescriptorRequest buildGetNamespaceDescriptorRequest(final String name) {
|
||||||
|
GetNamespaceDescriptorRequest.Builder builder = GetNamespaceDescriptorRequest.newBuilder();
|
||||||
|
builder.setNamespaceName(name);
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hbase.client.AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY;
|
||||||
|
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class to test AsyncAdmin.
|
||||||
|
*/
|
||||||
|
public abstract class TestAsyncAdminBase {
|
||||||
|
|
||||||
|
protected static final Log LOG = LogFactory.getLog(TestAdmin1.class);
|
||||||
|
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
|
protected static byte[] FAMILY = Bytes.toBytes("testFamily");
|
||||||
|
protected static final byte[] FAMILY_0 = Bytes.toBytes("cf0");
|
||||||
|
protected static final byte[] FAMILY_1 = Bytes.toBytes("cf1");
|
||||||
|
|
||||||
|
protected static AsyncConnection ASYNC_CONN;
|
||||||
|
protected AsyncAdmin admin;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_PAUSE, 10);
|
||||||
|
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
|
||||||
|
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1000);
|
||||||
|
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 3000);
|
||||||
|
TEST_UTIL.getConfiguration().setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0);
|
||||||
|
TEST_UTIL.startMiniCluster(1);
|
||||||
|
ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDownAfterClass() throws Exception {
|
||||||
|
IOUtils.closeQuietly(ASYNC_CONN);
|
||||||
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
this.admin = ASYNC_CONN.getAdmin();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
@Category({ MediumTests.class, ClientTests.class })
|
||||||
|
public class TestAsyncBalancerAdminApi extends TestAsyncAdminBase {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBalancer() throws Exception {
|
||||||
|
boolean initialState = admin.isBalancerEnabled().get();
|
||||||
|
|
||||||
|
// Start the balancer, wait for it.
|
||||||
|
boolean prevState = admin.setBalancerRunning(!initialState).get();
|
||||||
|
|
||||||
|
// The previous state should be the original state we observed
|
||||||
|
assertEquals(initialState, prevState);
|
||||||
|
|
||||||
|
// Current state should be opposite of the original
|
||||||
|
assertEquals(!initialState, admin.isBalancerEnabled().get());
|
||||||
|
|
||||||
|
// Reset it back to what it was
|
||||||
|
prevState = admin.setBalancerRunning(initialState).get();
|
||||||
|
|
||||||
|
// The previous state should be the opposite of the initial state
|
||||||
|
assertEquals(!initialState, prevState);
|
||||||
|
// Current state should be the original state again
|
||||||
|
assertEquals(initialState, admin.isBalancerEnabled().get());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,189 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hbase.client.AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
import static org.junit.Assert.assertNull;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
import java.util.concurrent.Callable;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.NamespaceExistException;
|
||||||
|
import org.apache.hadoop.hbase.NamespaceNotFoundException;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.Waiter;
|
||||||
|
import org.apache.hadoop.hbase.ZKNamespaceManager;
|
||||||
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class to test asynchronous namespace admin operations.
|
||||||
|
*/
|
||||||
|
@Category({ MediumTests.class, ClientTests.class })
|
||||||
|
public class TestAsyncNamespaceAdminApi extends TestAsyncAdminBase {
|
||||||
|
|
||||||
|
private String prefix = "TestNamespace";
|
||||||
|
private static HMaster master;
|
||||||
|
private static ZKNamespaceManager zkNamespaceManager;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
TEST_UTIL.getConfiguration().setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0);
|
||||||
|
TEST_UTIL.startMiniCluster(1);
|
||||||
|
ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
|
||||||
|
master = ((MiniHBaseCluster) TEST_UTIL.getHBaseCluster()).getMaster();
|
||||||
|
zkNamespaceManager = new ZKNamespaceManager(master.getZooKeeper());
|
||||||
|
zkNamespaceManager.start();
|
||||||
|
LOG.info("Done initializing cluster");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testCreateAndDelete() throws Exception {
|
||||||
|
String testName = "testCreateAndDelete";
|
||||||
|
String nsName = prefix + "_" + testName;
|
||||||
|
|
||||||
|
// create namespace and verify
|
||||||
|
admin.createNamespace(NamespaceDescriptor.create(nsName).build()).join();
|
||||||
|
assertEquals(3, admin.listNamespaceDescriptors().get().length);
|
||||||
|
TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
|
||||||
|
@Override
|
||||||
|
public boolean evaluate() throws Exception {
|
||||||
|
return zkNamespaceManager.list().size() == 3;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
assertNotNull(zkNamespaceManager.get(nsName));
|
||||||
|
// delete namespace and verify
|
||||||
|
admin.deleteNamespace(nsName).join();
|
||||||
|
assertEquals(2, admin.listNamespaceDescriptors().get().length);
|
||||||
|
assertEquals(2, zkNamespaceManager.list().size());
|
||||||
|
assertNull(zkNamespaceManager.get(nsName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testDeleteReservedNS() throws Exception {
|
||||||
|
boolean exceptionCaught = false;
|
||||||
|
try {
|
||||||
|
admin.deleteNamespace(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR).join();
|
||||||
|
} catch (Exception exp) {
|
||||||
|
LOG.warn(exp);
|
||||||
|
exceptionCaught = true;
|
||||||
|
} finally {
|
||||||
|
assertTrue(exceptionCaught);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
admin.deleteNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR).join();
|
||||||
|
} catch (Exception exp) {
|
||||||
|
LOG.warn(exp);
|
||||||
|
exceptionCaught = true;
|
||||||
|
} finally {
|
||||||
|
assertTrue(exceptionCaught);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testNamespaceOperations() throws Exception {
|
||||||
|
admin.createNamespace(NamespaceDescriptor.create(prefix + "ns1").build()).join();
|
||||||
|
admin.createNamespace(NamespaceDescriptor.create(prefix + "ns2").build()).join();
|
||||||
|
|
||||||
|
// create namespace that already exists
|
||||||
|
runWithExpectedException(new Callable<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void call() throws Exception {
|
||||||
|
admin.createNamespace(NamespaceDescriptor.create(prefix + "ns1").build()).join();
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}, NamespaceExistException.class);
|
||||||
|
|
||||||
|
// create a table in non-existing namespace
|
||||||
|
runWithExpectedException(new Callable<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void call() throws Exception {
|
||||||
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("non_existing_namespace",
|
||||||
|
"table1"));
|
||||||
|
htd.addFamily(new HColumnDescriptor("family1"));
|
||||||
|
admin.createTable(htd).join();
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}, NamespaceNotFoundException.class);
|
||||||
|
|
||||||
|
// get descriptor for existing namespace
|
||||||
|
NamespaceDescriptor ns1 = admin.getNamespaceDescriptor(prefix + "ns1").get();
|
||||||
|
assertEquals(prefix + "ns1", ns1.getName());
|
||||||
|
|
||||||
|
// get descriptor for non-existing namespace
|
||||||
|
runWithExpectedException(new Callable<NamespaceDescriptor>() {
|
||||||
|
@Override
|
||||||
|
public NamespaceDescriptor call() throws Exception {
|
||||||
|
return admin.getNamespaceDescriptor("non_existing_namespace").get();
|
||||||
|
}
|
||||||
|
}, NamespaceNotFoundException.class);
|
||||||
|
|
||||||
|
// delete descriptor for existing namespace
|
||||||
|
admin.deleteNamespace(prefix + "ns2").join();
|
||||||
|
|
||||||
|
// delete descriptor for non-existing namespace
|
||||||
|
runWithExpectedException(new Callable<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void call() throws Exception {
|
||||||
|
admin.deleteNamespace("non_existing_namespace").join();
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}, NamespaceNotFoundException.class);
|
||||||
|
|
||||||
|
// modify namespace descriptor for existing namespace
|
||||||
|
ns1 = admin.getNamespaceDescriptor(prefix + "ns1").get();
|
||||||
|
ns1.setConfiguration("foo", "bar");
|
||||||
|
admin.modifyNamespace(ns1).join();
|
||||||
|
ns1 = admin.getNamespaceDescriptor(prefix + "ns1").get();
|
||||||
|
assertEquals("bar", ns1.getConfigurationValue("foo"));
|
||||||
|
|
||||||
|
// modify namespace descriptor for non-existing namespace
|
||||||
|
runWithExpectedException(new Callable<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void call() throws Exception {
|
||||||
|
admin.modifyNamespace(NamespaceDescriptor.create("non_existing_namespace").build()).join();
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}, NamespaceNotFoundException.class);
|
||||||
|
|
||||||
|
admin.deleteNamespace(prefix + "ns1").join();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static <V, E> void runWithExpectedException(Callable<V> callable, Class<E> exceptionClass) {
|
||||||
|
try {
|
||||||
|
callable.call();
|
||||||
|
} catch (Exception ex) {
|
||||||
|
LOG.info("Get exception is " + ex);
|
||||||
|
assertEquals(exceptionClass, ex.getCause().getClass());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
fail("Should have thrown exception " + exceptionClass);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,203 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class to test asynchronous region admin operations.
|
||||||
|
*/
|
||||||
|
@Category({ MediumTests.class, ClientTests.class })
|
||||||
|
public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
|
||||||
|
|
||||||
|
private void createTableWithDefaultConf(TableName TABLENAME) throws Exception {
|
||||||
|
HTableDescriptor htd = new HTableDescriptor(TABLENAME);
|
||||||
|
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
||||||
|
htd.addFamily(hcd);
|
||||||
|
|
||||||
|
admin.createTable(htd, null).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCloseRegion() throws Exception {
|
||||||
|
TableName TABLENAME = TableName.valueOf("TestHBACloseRegion");
|
||||||
|
createTableWithDefaultConf(TABLENAME);
|
||||||
|
|
||||||
|
HRegionInfo info = null;
|
||||||
|
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME);
|
||||||
|
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||||
|
for (HRegionInfo regionInfo : onlineRegions) {
|
||||||
|
if (!regionInfo.getTable().isSystemTable()) {
|
||||||
|
info = regionInfo;
|
||||||
|
boolean closed = admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(),
|
||||||
|
rs.getServerName().getServerName()).get();
|
||||||
|
assertTrue(closed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
boolean isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info);
|
||||||
|
long timeout = System.currentTimeMillis() + 10000;
|
||||||
|
while ((System.currentTimeMillis() < timeout) && (isInList)) {
|
||||||
|
Thread.sleep(100);
|
||||||
|
isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertFalse("The region should not be present in online regions list.", isInList);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception {
|
||||||
|
final String name = "TestHBACloseRegion1";
|
||||||
|
byte[] TABLENAME = Bytes.toBytes(name);
|
||||||
|
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
||||||
|
|
||||||
|
HRegionInfo info = null;
|
||||||
|
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
||||||
|
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||||
|
for (HRegionInfo regionInfo : onlineRegions) {
|
||||||
|
if (!regionInfo.isMetaTable()) {
|
||||||
|
if (regionInfo.getRegionNameAsString().contains(name)) {
|
||||||
|
info = regionInfo;
|
||||||
|
boolean catchNotServingException = false;
|
||||||
|
try {
|
||||||
|
admin.closeRegionWithEncodedRegionName("sample", rs.getServerName().getServerName())
|
||||||
|
.get();
|
||||||
|
} catch (Exception e) {
|
||||||
|
catchNotServingException = true;
|
||||||
|
// expected, ignore it
|
||||||
|
}
|
||||||
|
assertTrue(catchNotServingException);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||||
|
assertTrue("The region should be present in online regions list.",
|
||||||
|
onlineRegions.contains(info));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCloseRegionWhenServerNameIsNull() throws Exception {
|
||||||
|
byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion3");
|
||||||
|
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
||||||
|
|
||||||
|
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
||||||
|
|
||||||
|
try {
|
||||||
|
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||||
|
for (HRegionInfo regionInfo : onlineRegions) {
|
||||||
|
if (!regionInfo.isMetaTable()) {
|
||||||
|
if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion3")) {
|
||||||
|
admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), null).get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fail("The test should throw exception if the servername passed is null.");
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCloseRegionWhenServerNameIsEmpty() throws Exception {
|
||||||
|
byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegionWhenServerNameIsEmpty");
|
||||||
|
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
||||||
|
|
||||||
|
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
||||||
|
|
||||||
|
try {
|
||||||
|
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||||
|
for (HRegionInfo regionInfo : onlineRegions) {
|
||||||
|
if (!regionInfo.isMetaTable()) {
|
||||||
|
if (regionInfo.getRegionNameAsString()
|
||||||
|
.contains("TestHBACloseRegionWhenServerNameIsEmpty")) {
|
||||||
|
admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), " ").get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fail("The test should throw exception if the servername passed is empty.");
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCloseRegionWhenEncodedRegionNameIsNotGiven() throws Exception {
|
||||||
|
byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion4");
|
||||||
|
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
||||||
|
|
||||||
|
HRegionInfo info = null;
|
||||||
|
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
||||||
|
|
||||||
|
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||||
|
for (HRegionInfo regionInfo : onlineRegions) {
|
||||||
|
if (!regionInfo.isMetaTable()) {
|
||||||
|
if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion4")) {
|
||||||
|
info = regionInfo;
|
||||||
|
boolean catchNotServingException = false;
|
||||||
|
try {
|
||||||
|
admin.closeRegionWithEncodedRegionName(regionInfo.getRegionNameAsString(),
|
||||||
|
rs.getServerName().getServerName()).get();
|
||||||
|
} catch (Exception e) {
|
||||||
|
// expected, ignore it.
|
||||||
|
catchNotServingException = true;
|
||||||
|
}
|
||||||
|
assertTrue(catchNotServingException);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
||||||
|
assertTrue("The region should be present in online regions list.",
|
||||||
|
onlineRegions.contains(info));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetRegion() throws Exception {
|
||||||
|
AsyncHBaseAdmin rawAdmin = (AsyncHBaseAdmin) admin;
|
||||||
|
|
||||||
|
final TableName tableName = TableName.valueOf("testGetRegion");
|
||||||
|
LOG.info("Started " + tableName);
|
||||||
|
TEST_UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
|
||||||
|
|
||||||
|
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
|
||||||
|
HRegionLocation regionLocation = locator.getRegionLocation(Bytes.toBytes("mmm"));
|
||||||
|
HRegionInfo region = regionLocation.getRegionInfo();
|
||||||
|
byte[] regionName = region.getRegionName();
|
||||||
|
Pair<HRegionInfo, ServerName> pair = rawAdmin.getRegion(regionName).get();
|
||||||
|
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
|
||||||
|
pair = rawAdmin.getRegion(region.getEncodedNameAsBytes()).get();
|
||||||
|
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -17,11 +17,9 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import static org.apache.hadoop.hbase.client.AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY;
|
|
||||||
import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -36,12 +34,8 @@ import java.util.Set;
|
||||||
import java.util.concurrent.CompletionException;
|
import java.util.concurrent.CompletionException;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
@ -51,63 +45,26 @@ import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.TableNotEnabledException;
|
import org.apache.hadoop.hbase.TableNotEnabledException;
|
||||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
|
||||||
import org.junit.AfterClass;
|
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
import org.junit.rules.TestName;
|
import org.junit.rules.TestName;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class to test AsyncAdmin.
|
* Class to test asynchronous table admin operations.
|
||||||
*/
|
*/
|
||||||
@Category({LargeTests.class, ClientTests.class})
|
@Category({LargeTests.class, ClientTests.class})
|
||||||
public class TestAsyncAdmin {
|
public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(TestAdmin1.class);
|
|
||||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
|
||||||
private static byte[] FAMILY = Bytes.toBytes("testFamily");
|
|
||||||
private static final byte[] FAMILY_0 = Bytes.toBytes("cf0");
|
|
||||||
private static final byte[] FAMILY_1 = Bytes.toBytes("cf1");
|
|
||||||
|
|
||||||
private static AsyncConnection ASYNC_CONN;
|
|
||||||
private AsyncAdmin admin;
|
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public TestName name = new TestName();
|
public TestName name = new TestName();
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
public static void setUpBeforeClass() throws Exception {
|
|
||||||
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_PAUSE, 10);
|
|
||||||
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
|
|
||||||
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1000);
|
|
||||||
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 3000);
|
|
||||||
TEST_UTIL.getConfiguration().setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0);
|
|
||||||
TEST_UTIL.startMiniCluster(1);
|
|
||||||
ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
|
|
||||||
}
|
|
||||||
|
|
||||||
@AfterClass
|
|
||||||
public static void tearDownAfterClass() throws Exception {
|
|
||||||
IOUtils.closeQuietly(ASYNC_CONN);
|
|
||||||
TEST_UTIL.shutdownMiniCluster();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void setUp() throws Exception {
|
|
||||||
this.admin = ASYNC_CONN.getAdmin();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTableExist() throws Exception {
|
public void testTableExist() throws Exception {
|
||||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
|
@ -200,7 +157,7 @@ public class TestAsyncAdmin {
|
||||||
|
|
||||||
private TableState.State getStateFromMeta(TableName table) throws Exception {
|
private TableState.State getStateFromMeta(TableName table) throws Exception {
|
||||||
Optional<TableState> state = AsyncMetaTableAccessor.getTableState(
|
Optional<TableState> state = AsyncMetaTableAccessor.getTableState(
|
||||||
ASYNC_CONN.getRawTable(META_TABLE_NAME), table).get();
|
ASYNC_CONN.getRawTable(TableName.META_TABLE_NAME), table).get();
|
||||||
assertTrue(state.isPresent());
|
assertTrue(state.isPresent());
|
||||||
return state.get().getState();
|
return state.get().getState();
|
||||||
}
|
}
|
||||||
|
@ -878,210 +835,4 @@ public class TestAsyncAdmin {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 30000)
|
|
||||||
public void testBalancer() throws Exception {
|
|
||||||
boolean initialState = admin.isBalancerEnabled().get();
|
|
||||||
|
|
||||||
// Start the balancer, wait for it.
|
|
||||||
boolean prevState = admin.setBalancerRunning(!initialState).get();
|
|
||||||
|
|
||||||
// The previous state should be the original state we observed
|
|
||||||
assertEquals(initialState, prevState);
|
|
||||||
|
|
||||||
// Current state should be opposite of the original
|
|
||||||
assertEquals(!initialState, admin.isBalancerEnabled().get());
|
|
||||||
|
|
||||||
// Reset it back to what it was
|
|
||||||
prevState = admin.setBalancerRunning(initialState).get();
|
|
||||||
|
|
||||||
// The previous state should be the opposite of the initial state
|
|
||||||
assertEquals(!initialState, prevState);
|
|
||||||
// Current state should be the original state again
|
|
||||||
assertEquals(initialState, admin.isBalancerEnabled().get());
|
|
||||||
}
|
|
||||||
|
|
||||||
private void createTableWithDefaultConf(TableName TABLENAME) throws Exception {
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(TABLENAME);
|
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor("value");
|
|
||||||
htd.addFamily(hcd);
|
|
||||||
|
|
||||||
admin.createTable(htd, null).get();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCloseRegion() throws Exception {
|
|
||||||
TableName TABLENAME = TableName.valueOf("TestHBACloseRegion");
|
|
||||||
createTableWithDefaultConf(TABLENAME);
|
|
||||||
|
|
||||||
HRegionInfo info = null;
|
|
||||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME);
|
|
||||||
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
|
||||||
for (HRegionInfo regionInfo : onlineRegions) {
|
|
||||||
if (!regionInfo.getTable().isSystemTable()) {
|
|
||||||
info = regionInfo;
|
|
||||||
boolean closed = admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(),
|
|
||||||
rs.getServerName().getServerName()).get();
|
|
||||||
assertTrue(closed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
boolean isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info);
|
|
||||||
long timeout = System.currentTimeMillis() + 10000;
|
|
||||||
while ((System.currentTimeMillis() < timeout) && (isInList)) {
|
|
||||||
Thread.sleep(100);
|
|
||||||
isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info);
|
|
||||||
}
|
|
||||||
|
|
||||||
assertFalse("The region should not be present in online regions list.", isInList);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception {
|
|
||||||
final String name = "TestHBACloseRegion1";
|
|
||||||
byte[] TABLENAME = Bytes.toBytes(name);
|
|
||||||
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
|
||||||
|
|
||||||
HRegionInfo info = null;
|
|
||||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
|
||||||
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
|
||||||
for (HRegionInfo regionInfo : onlineRegions) {
|
|
||||||
if (!regionInfo.isMetaTable()) {
|
|
||||||
if (regionInfo.getRegionNameAsString().contains(name)) {
|
|
||||||
info = regionInfo;
|
|
||||||
boolean catchNotServingException = false;
|
|
||||||
try {
|
|
||||||
admin.closeRegionWithEncodedRegionName("sample", rs.getServerName().getServerName())
|
|
||||||
.get();
|
|
||||||
} catch (Exception e) {
|
|
||||||
catchNotServingException = true;
|
|
||||||
// expected, ignore it
|
|
||||||
}
|
|
||||||
assertTrue(catchNotServingException);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
|
||||||
assertTrue("The region should be present in online regions list.",
|
|
||||||
onlineRegions.contains(info));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCloseRegionThatFetchesTheHRIFromMeta() throws Exception {
|
|
||||||
TableName TABLENAME = TableName.valueOf("TestHBACloseRegion2");
|
|
||||||
createTableWithDefaultConf(TABLENAME);
|
|
||||||
|
|
||||||
HRegionInfo info = null;
|
|
||||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME);
|
|
||||||
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
|
||||||
for (HRegionInfo regionInfo : onlineRegions) {
|
|
||||||
if (!regionInfo.isMetaTable()) {
|
|
||||||
|
|
||||||
if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion2")) {
|
|
||||||
info = regionInfo;
|
|
||||||
admin.closeRegion(regionInfo.getRegionNameAsString(), rs.getServerName().getServerName())
|
|
||||||
.get();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info);
|
|
||||||
long timeout = System.currentTimeMillis() + 10000;
|
|
||||||
while ((System.currentTimeMillis() < timeout) && (isInList)) {
|
|
||||||
Thread.sleep(100);
|
|
||||||
isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info);
|
|
||||||
}
|
|
||||||
|
|
||||||
assertFalse("The region should not be present in online regions list.", isInList);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCloseRegionWhenServerNameIsNull() throws Exception {
|
|
||||||
byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion3");
|
|
||||||
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
|
||||||
|
|
||||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
|
||||||
|
|
||||||
try {
|
|
||||||
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
|
||||||
for (HRegionInfo regionInfo : onlineRegions) {
|
|
||||||
if (!regionInfo.isMetaTable()) {
|
|
||||||
if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion3")) {
|
|
||||||
admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), null).get();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fail("The test should throw exception if the servername passed is null.");
|
|
||||||
} catch (IllegalArgumentException e) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCloseRegionWhenServerNameIsEmpty() throws Exception {
|
|
||||||
byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegionWhenServerNameIsEmpty");
|
|
||||||
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
|
||||||
|
|
||||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
|
||||||
|
|
||||||
try {
|
|
||||||
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
|
||||||
for (HRegionInfo regionInfo : onlineRegions) {
|
|
||||||
if (!regionInfo.isMetaTable()) {
|
|
||||||
if (regionInfo.getRegionNameAsString()
|
|
||||||
.contains("TestHBACloseRegionWhenServerNameIsEmpty")) {
|
|
||||||
admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), " ").get();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fail("The test should throw exception if the servername passed is empty.");
|
|
||||||
} catch (IllegalArgumentException e) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCloseRegionWhenEncodedRegionNameIsNotGiven() throws Exception {
|
|
||||||
byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion4");
|
|
||||||
createTableWithDefaultConf(TableName.valueOf(TABLENAME));
|
|
||||||
|
|
||||||
HRegionInfo info = null;
|
|
||||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME));
|
|
||||||
|
|
||||||
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
|
||||||
for (HRegionInfo regionInfo : onlineRegions) {
|
|
||||||
if (!regionInfo.isMetaTable()) {
|
|
||||||
if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion4")) {
|
|
||||||
info = regionInfo;
|
|
||||||
boolean catchNotServingException = false;
|
|
||||||
try {
|
|
||||||
admin.closeRegionWithEncodedRegionName(regionInfo.getRegionNameAsString(),
|
|
||||||
rs.getServerName().getServerName()).get();
|
|
||||||
} catch (Exception e) {
|
|
||||||
// expected, ignore it.
|
|
||||||
catchNotServingException = true;
|
|
||||||
}
|
|
||||||
assertTrue(catchNotServingException);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
|
|
||||||
assertTrue("The region should be present in online regions list.",
|
|
||||||
onlineRegions.contains(info));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetRegion() throws Exception {
|
|
||||||
AsyncHBaseAdmin rawAdmin = (AsyncHBaseAdmin) admin;
|
|
||||||
|
|
||||||
final TableName tableName = TableName.valueOf("testGetRegion");
|
|
||||||
LOG.info("Started " + tableName);
|
|
||||||
TEST_UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
|
|
||||||
|
|
||||||
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
|
|
||||||
HRegionLocation regionLocation = locator.getRegionLocation(Bytes.toBytes("mmm"));
|
|
||||||
HRegionInfo region = regionLocation.getRegionInfo();
|
|
||||||
byte[] regionName = region.getRegionName();
|
|
||||||
Pair<HRegionInfo, ServerName> pair = rawAdmin.getRegion(regionName).get();
|
|
||||||
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
|
|
||||||
pair = rawAdmin.getRegion(region.getEncodedNameAsBytes()).get();
|
|
||||||
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
Loading…
Reference in New Issue