diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index dfcb1b9ff21..1c6c3768fb0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -257,7 +257,8 @@ public interface Admin extends Abortable, Closeable { * @return the result of the async creation. You can use Future.get(long, TimeUnit) * to wait on the operation to complete. */ - Future createTableAsync(final HTableDescriptor desc, final byte[][] splitKeys) throws IOException; + Future createTableAsync(final HTableDescriptor desc, final byte[][] splitKeys) + throws IOException; /** * Deletes a table. Synchronous operation. @@ -1016,6 +1017,33 @@ public interface Admin extends Abortable, Closeable { HTableDescriptor[] getTableDescriptors(List names) throws IOException; + /** + * abort a procedure + * @param procId ID of the procedure to abort + * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? + * @return true if aborted, false if procedure already completed or does not exist + * @throws IOException + */ + boolean abortProcedure( + final long procId, + final boolean mayInterruptIfRunning) throws IOException; + + /** + * Abort a procedure but does not block and wait for it be completely removed. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * It may throw ExecutionException if there was an error while executing the operation + * or TimeoutException in case the wait timeout was not long enough to allow the + * operation to complete. + * + * @param procId ID of the procedure to abort + * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? + * @return true if aborted, false if procedure already completed or does not exist + * @throws IOException + */ + Future abortProcedureAsync( + final long procId, + final boolean mayInterruptIfRunning) throws IOException; + /** * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file. * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index a0d9955845a..2262a0f29d4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1295,8 +1295,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable { if (isDeadServer(sn)) { throw new RegionServerStoppedException(sn + " is dead."); } - String key = getStubKey(ClientProtos.ClientService.BlockingInterface.class.getName(), sn.getHostname(), - sn.getPort()); + String key = getStubKey( + ClientProtos.ClientService.BlockingInterface.class.getName(), sn.getHostname(), + sn.getPort()); this.connectionLock.putIfAbsent(key, key); ClientProtos.ClientService.BlockingInterface stub = null; synchronized (this.connectionLock.get(key)) { @@ -1409,9 +1410,18 @@ class ConnectionImplementation implements ClusterConnection, Closeable { final MasterProtos.MasterService.BlockingInterface stub = this.masterServiceState.stub; return new MasterKeepAliveConnection() { MasterServiceState mss = masterServiceState; + @Override - public MasterProtos.AddColumnResponse addColumn(RpcController controller, MasterProtos.AddColumnRequest request) - throws ServiceException { + public MasterProtos.AbortProcedureResponse abortProcedure( + RpcController controller, + MasterProtos.AbortProcedureRequest request) throws ServiceException { + return stub.abortProcedure(controller, request); + } + + @Override + public MasterProtos.AddColumnResponse addColumn( + RpcController controller, + MasterProtos.AddColumnRequest request) throws ServiceException { return stub.addColumn(controller, request); } @@ -1629,24 +1639,28 @@ class ConnectionImplementation implements ClusterConnection, Closeable { @Override public MasterProtos.CreateNamespaceResponse createNamespace( - RpcController controller, MasterProtos.CreateNamespaceRequest request) throws ServiceException { + RpcController controller, + MasterProtos.CreateNamespaceRequest request) throws ServiceException { return stub.createNamespace(controller, request); } @Override public MasterProtos.DeleteNamespaceResponse deleteNamespace( - RpcController controller, MasterProtos.DeleteNamespaceRequest request) throws ServiceException { + RpcController controller, + MasterProtos.DeleteNamespaceRequest request) throws ServiceException { return stub.deleteNamespace(controller, request); } @Override - public MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller, + public MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor( + RpcController controller, MasterProtos.GetNamespaceDescriptorRequest request) throws ServiceException { return stub.getNamespaceDescriptor(controller, request); } @Override - public MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller, + public MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors( + RpcController controller, MasterProtos.ListNamespaceDescriptorsRequest request) throws ServiceException { return stub.listNamespaceDescriptors(controller, request); } @@ -2100,7 +2114,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable { * point, which would be the case if all of its consumers close the * connection. However, on the off chance that someone is unable to close * the connection, perhaps because it bailed out prematurely, the method - * below will ensure that this {@link org.apache.hadoop.hbase.client.HConnection} instance is cleaned up. + * below will ensure that this {@link org.apache.hadoop.hbase.client.HConnection} instance + * is cleaned up. * Caveat: The JVM may take an unknown amount of time to call finalize on an * unreachable object, so our hope is that every consumer cleans up after * itself, like any good citizen. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 84c9c4909a6..2268d3eb7c2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -88,6 +88,8 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescripti import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest; @@ -279,6 +281,86 @@ public class HBaseAdmin implements Admin { return this.aborted; } + /** + * Abort a procedure + * @param procId ID of the procedure to abort + * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? + * @return true if aborted, false if procedure already completed or does not exist + * @throws IOException + */ + @Override + public boolean abortProcedure( + final long procId, + final boolean mayInterruptIfRunning) throws IOException { + Future future = abortProcedureAsync(procId, mayInterruptIfRunning); + try { + return future.get(syncWaitTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when waiting for procedure to be cancelled"); + } catch (TimeoutException e) { + throw new TimeoutIOException(e); + } catch (ExecutionException e) { + if (e.getCause() instanceof IOException) { + throw (IOException)e.getCause(); + } else { + throw new IOException(e.getCause()); + } + } + } + + /** + * Abort a procedure but does not block and wait for it be completely removed. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * It may throw ExecutionException if there was an error while executing the operation + * or TimeoutException in case the wait timeout was not long enough to allow the + * operation to complete. + * + * @param procId ID of the procedure to abort + * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? + * @return true if aborted, false if procedure already completed or does not exist + * @throws IOException + */ + @Override + public Future abortProcedureAsync( + final long procId, + final boolean mayInterruptIfRunning) throws IOException { + Boolean abortProcResponse = executeCallable( + new MasterCallable(getConnection()) { + @Override + public AbortProcedureResponse call(int callTimeout) throws ServiceException { + AbortProcedureRequest abortProcRequest = + AbortProcedureRequest.newBuilder().setProcId(procId).build(); + return master.abortProcedure(null,abortProcRequest); + } + }).getIsProcedureAborted(); + + AbortProcedureFuture abortProcFuture = + new AbortProcedureFuture(this, procId, abortProcResponse); + return abortProcFuture; + } + + private static class AbortProcedureFuture extends ProcedureFuture { + private boolean isAbortInProgress; + + public AbortProcedureFuture( + final HBaseAdmin admin, + final Long procId, + final Boolean abortProcResponse) { + super(admin, procId); + this.isAbortInProgress = abortProcResponse; + } + + @Override + public Boolean get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + if (!this.isAbortInProgress) { + return false; + } + super.get(timeout, unit); + return true; + } + } + /** @return HConnection used by this object. */ @Override public HConnection getConnection() { @@ -4257,6 +4339,7 @@ public class HBaseAdmin implements Admin { private ExecutionException exception = null; private boolean procResultFound = false; private boolean done = false; + private boolean cancelled = false; private V result = null; private final HBaseAdmin admin; @@ -4269,13 +4352,39 @@ public class HBaseAdmin implements Admin { @Override public boolean cancel(boolean mayInterruptIfRunning) { - throw new UnsupportedOperationException(); + AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder() + .setProcId(procId).setMayInterruptIfRunning(mayInterruptIfRunning).build(); + try { + cancelled = abortProcedureResult(abortProcRequest).getIsProcedureAborted(); + if (cancelled) { + done = true; + } + } catch (IOException e) { + // Cancell thrown exception for some reason. At this time, we are not sure whether + // the cancell succeeds or fails. We assume that it is failed, but print out a warning + // for debugging purpose. + LOG.warn( + "Cancelling the procedure with procId=" + procId + " throws exception " + e.getMessage(), + e); + cancelled = false; + } + return cancelled; } @Override public boolean isCancelled() { - // TODO: Abort not implemented yet - return false; + return cancelled; + } + + protected AbortProcedureResponse abortProcedureResult( + final AbortProcedureRequest request) throws IOException { + return admin.executeCallable(new MasterCallable( + admin.getConnection()) { + @Override + public AbortProcedureResponse call(int callTimeout) throws ServiceException { + return master.abortProcedure(null, request); + } + }); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 01e9a374813..db0fc9705b7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -696,9 +696,24 @@ public class ProcedureExecutor { * @return true if the procedure exist and has received the abort, otherwise false. */ public boolean abort(final long procId) { + return abort(procId, true); + } + + /** + * Send an abort notification the specified procedure. + * Depending on the procedure implementation the abort can be considered or ignored. + * @param procId the procedure to abort + * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? + * @return true if the procedure exist and has received the abort, otherwise false. + */ + public boolean abort(final long procId, final boolean mayInterruptIfRunning) { Procedure proc = procedures.get(procId); if (proc != null) { - return proc.abort(getEnvironment()); + if (!mayInterruptIfRunning && proc.wasExecuted()) { + return false; + } else { + return proc.abort(getEnvironment()); + } } return false; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java index 1a4845cf731..9346ae8564e 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java @@ -38,21 +38,18 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.junit.After; import org.junit.Before; -import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; @Category({MasterTests.class, SmallTests.class}) public class TestProcedureRecovery { private static final Log LOG = LogFactory.getLog(TestProcedureRecovery.class); private static final int PROCEDURE_EXECUTOR_SLOTS = 1; - private static final Procedure NULL_PROC = null; private static TestProcEnv procEnv; private static ProcedureExecutor procExecutor; diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 583020eb283..9c6b3dfa0d9 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -47667,6 +47667,980 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:hbase.pb.GetProcedureResultResponse) } + public interface AbortProcedureRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 proc_id = 1; + /** + * required uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * required uint64 proc_id = 1; + */ + long getProcId(); + + // optional bool mayInterruptIfRunning = 2 [default = true]; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + boolean hasMayInterruptIfRunning(); + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + boolean getMayInterruptIfRunning(); + } + /** + * Protobuf type {@code hbase.pb.AbortProcedureRequest} + */ + public static final class AbortProcedureRequest extends + com.google.protobuf.GeneratedMessage + implements AbortProcedureRequestOrBuilder { + // Use AbortProcedureRequest.newBuilder() to construct. + private AbortProcedureRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AbortProcedureRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AbortProcedureRequest defaultInstance; + public static AbortProcedureRequest getDefaultInstance() { + return defaultInstance; + } + + public AbortProcedureRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AbortProcedureRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + mayInterruptIfRunning_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AbortProcedureRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AbortProcedureRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * required uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + // optional bool mayInterruptIfRunning = 2 [default = true]; + public static final int MAYINTERRUPTIFRUNNING_FIELD_NUMBER = 2; + private boolean mayInterruptIfRunning_; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean hasMayInterruptIfRunning() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean getMayInterruptIfRunning() { + return mayInterruptIfRunning_; + } + + private void initFields() { + procId_ = 0L; + mayInterruptIfRunning_ = true; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasProcId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, mayInterruptIfRunning_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, mayInterruptIfRunning_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && (hasMayInterruptIfRunning() == other.hasMayInterruptIfRunning()); + if (hasMayInterruptIfRunning()) { + result = result && (getMayInterruptIfRunning() + == other.getMayInterruptIfRunning()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + if (hasMayInterruptIfRunning()) { + hash = (37 * hash) + MAYINTERRUPTIFRUNNING_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getMayInterruptIfRunning()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AbortProcedureRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + mayInterruptIfRunning_ = true; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.mayInterruptIfRunning_ = mayInterruptIfRunning_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + if (other.hasMayInterruptIfRunning()) { + setMayInterruptIfRunning(other.getMayInterruptIfRunning()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasProcId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint64 proc_id = 1; + private long procId_ ; + /** + * required uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * required uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * required uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + + // optional bool mayInterruptIfRunning = 2 [default = true]; + private boolean mayInterruptIfRunning_ = true; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean hasMayInterruptIfRunning() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean getMayInterruptIfRunning() { + return mayInterruptIfRunning_; + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public Builder setMayInterruptIfRunning(boolean value) { + bitField0_ |= 0x00000002; + mayInterruptIfRunning_ = value; + onChanged(); + return this; + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public Builder clearMayInterruptIfRunning() { + bitField0_ = (bitField0_ & ~0x00000002); + mayInterruptIfRunning_ = true; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AbortProcedureRequest) + } + + static { + defaultInstance = new AbortProcedureRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AbortProcedureRequest) + } + + public interface AbortProcedureResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool is_procedure_aborted = 1; + /** + * required bool is_procedure_aborted = 1; + */ + boolean hasIsProcedureAborted(); + /** + * required bool is_procedure_aborted = 1; + */ + boolean getIsProcedureAborted(); + } + /** + * Protobuf type {@code hbase.pb.AbortProcedureResponse} + */ + public static final class AbortProcedureResponse extends + com.google.protobuf.GeneratedMessage + implements AbortProcedureResponseOrBuilder { + // Use AbortProcedureResponse.newBuilder() to construct. + private AbortProcedureResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AbortProcedureResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AbortProcedureResponse defaultInstance; + public static AbortProcedureResponse getDefaultInstance() { + return defaultInstance; + } + + public AbortProcedureResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AbortProcedureResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + isProcedureAborted_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AbortProcedureResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AbortProcedureResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool is_procedure_aborted = 1; + public static final int IS_PROCEDURE_ABORTED_FIELD_NUMBER = 1; + private boolean isProcedureAborted_; + /** + * required bool is_procedure_aborted = 1; + */ + public boolean hasIsProcedureAborted() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool is_procedure_aborted = 1; + */ + public boolean getIsProcedureAborted() { + return isProcedureAborted_; + } + + private void initFields() { + isProcedureAborted_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasIsProcedureAborted()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, isProcedureAborted_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, isProcedureAborted_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) obj; + + boolean result = true; + result = result && (hasIsProcedureAborted() == other.hasIsProcedureAborted()); + if (hasIsProcedureAborted()) { + result = result && (getIsProcedureAborted() + == other.getIsProcedureAborted()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasIsProcedureAborted()) { + hash = (37 * hash) + IS_PROCEDURE_ABORTED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsProcedureAborted()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AbortProcedureResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + isProcedureAborted_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortProcedureResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.isProcedureAborted_ = isProcedureAborted_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()) return this; + if (other.hasIsProcedureAborted()) { + setIsProcedureAborted(other.getIsProcedureAborted()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasIsProcedureAborted()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool is_procedure_aborted = 1; + private boolean isProcedureAborted_ ; + /** + * required bool is_procedure_aborted = 1; + */ + public boolean hasIsProcedureAborted() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool is_procedure_aborted = 1; + */ + public boolean getIsProcedureAborted() { + return isProcedureAborted_; + } + /** + * required bool is_procedure_aborted = 1; + */ + public Builder setIsProcedureAborted(boolean value) { + bitField0_ |= 0x00000001; + isProcedureAborted_ = value; + onChanged(); + return this; + } + /** + * required bool is_procedure_aborted = 1; + */ + public Builder clearIsProcedureAborted() { + bitField0_ = (bitField0_ & ~0x00000001); + isProcedureAborted_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AbortProcedureResponse) + } + + static { + defaultInstance = new AbortProcedureResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AbortProcedureResponse) + } + public interface SetQuotaRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -52613,6 +53587,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc AbortProcedure(.hbase.pb.AbortProcedureRequest) returns (.hbase.pb.AbortProcedureResponse); + * + *
+       ** Abort a procedure 
+       * 
+ */ + public abstract void abortProcedure( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -53018,6 +54004,14 @@ public final class MasterProtos { impl.getSecurityCapabilities(controller, request, done); } + @java.lang.Override + public void abortProcedure( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request, + com.google.protobuf.RpcCallback done) { + impl.abortProcedure(controller, request, done); + } + }; } @@ -53140,6 +54134,8 @@ public final class MasterProtos { return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); case 49: return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request); + case 50: + return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -53254,6 +54250,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); case 49: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + case 50: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -53368,6 +54366,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); case 49: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + case 50: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -53999,6 +54999,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc AbortProcedure(.hbase.pb.AbortProcedureRequest) returns (.hbase.pb.AbortProcedureResponse); + * + *
+     ** Abort a procedure 
+     * 
+ */ + public abstract void abortProcedure( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -54271,6 +55283,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 50: + this.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -54385,6 +55402,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); case 49: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + case 50: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -54499,6 +55518,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); case 49: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + case 50: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -55269,6 +56290,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance())); } + + public void abortProcedure( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(50), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -55526,6 +56562,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse abortProcedure( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -56134,6 +57175,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse abortProcedure( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(50), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) @@ -56579,6 +57632,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AbortProcedureRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AbortProcedureResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_SetQuotaRequest_descriptor; private static @@ -56773,141 +57836,146 @@ public final class MasterProtos { " \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(" + "\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.ForeignE" + "xceptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022", - "\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"\315\001\n\017SetQuotaR" + - "equest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030" + - "\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030\004" + - " \001(\0132\023.hbase.pb.TableName\022\022\n\nremove_all\030" + - "\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+\n\010throttl" + - "e\030\007 \001(\0132\031.hbase.pb.ThrottleRequest\"\022\n\020Se" + - "tQuotaResponse\"J\n\037MajorCompactionTimesta" + - "mpRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" + - ".TableName\"U\n(MajorCompactionTimestampFo" + - "rRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.p", - "b.RegionSpecifier\"@\n MajorCompactionTime" + - "stampResponse\022\034\n\024compaction_timestamp\030\001 " + - "\002(\003\"\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034S" + - "ecurityCapabilitiesResponse\022G\n\014capabilit" + - "ies\030\001 \003(\01621.hbase.pb.SecurityCapabilitie" + - "sResponse.Capability\"\202\001\n\nCapability\022\031\n\025S" + - "IMPLE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENT" + - "ICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUT" + - "HORIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\0042\301#\n\rMa" + - "sterService\022e\n\024GetSchemaAlterStatus\022%.hb", - "ase.pb.GetSchemaAlterStatusRequest\032&.hba" + - "se.pb.GetSchemaAlterStatusResponse\022b\n\023Ge" + - "tTableDescriptors\022$.hbase.pb.GetTableDes" + - "criptorsRequest\032%.hbase.pb.GetTableDescr" + - "iptorsResponse\022P\n\rGetTableNames\022\036.hbase." + - "pb.GetTableNamesRequest\032\037.hbase.pb.GetTa" + - "bleNamesResponse\022Y\n\020GetClusterStatus\022!.h" + - "base.pb.GetClusterStatusRequest\032\".hbase." + - "pb.GetClusterStatusResponse\022V\n\017IsMasterR" + - "unning\022 .hbase.pb.IsMasterRunningRequest", - "\032!.hbase.pb.IsMasterRunningResponse\022D\n\tA" + - "ddColumn\022\032.hbase.pb.AddColumnRequest\032\033.h" + - "base.pb.AddColumnResponse\022M\n\014DeleteColum" + - "n\022\035.hbase.pb.DeleteColumnRequest\032\036.hbase" + - ".pb.DeleteColumnResponse\022M\n\014ModifyColumn" + - "\022\035.hbase.pb.ModifyColumnRequest\032\036.hbase." + - "pb.ModifyColumnResponse\022G\n\nMoveRegion\022\033." + - "hbase.pb.MoveRegionRequest\032\034.hbase.pb.Mo" + - "veRegionResponse\022k\n\026DispatchMergingRegio" + - "ns\022\'.hbase.pb.DispatchMergingRegionsRequ", - "est\032(.hbase.pb.DispatchMergingRegionsRes" + - "ponse\022M\n\014AssignRegion\022\035.hbase.pb.AssignR" + - "egionRequest\032\036.hbase.pb.AssignRegionResp" + - "onse\022S\n\016UnassignRegion\022\037.hbase.pb.Unassi" + - "gnRegionRequest\032 .hbase.pb.UnassignRegio" + - "nResponse\022P\n\rOfflineRegion\022\036.hbase.pb.Of" + - "flineRegionRequest\032\037.hbase.pb.OfflineReg" + - "ionResponse\022J\n\013DeleteTable\022\034.hbase.pb.De" + - "leteTableRequest\032\035.hbase.pb.DeleteTableR" + - "esponse\022P\n\rtruncateTable\022\036.hbase.pb.Trun", - "cateTableRequest\032\037.hbase.pb.TruncateTabl" + - "eResponse\022J\n\013EnableTable\022\034.hbase.pb.Enab" + - "leTableRequest\032\035.hbase.pb.EnableTableRes" + - "ponse\022M\n\014DisableTable\022\035.hbase.pb.Disable" + - "TableRequest\032\036.hbase.pb.DisableTableResp" + - "onse\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTab" + - "leRequest\032\035.hbase.pb.ModifyTableResponse" + - "\022J\n\013CreateTable\022\034.hbase.pb.CreateTableRe" + - "quest\032\035.hbase.pb.CreateTableResponse\022A\n\010" + - "Shutdown\022\031.hbase.pb.ShutdownRequest\032\032.hb", - "ase.pb.ShutdownResponse\022G\n\nStopMaster\022\033." + - "hbase.pb.StopMasterRequest\032\034.hbase.pb.St" + - "opMasterResponse\022>\n\007Balance\022\030.hbase.pb.B" + - "alanceRequest\032\031.hbase.pb.BalanceResponse" + - "\022_\n\022SetBalancerRunning\022#.hbase.pb.SetBal" + - "ancerRunningRequest\032$.hbase.pb.SetBalanc" + - "erRunningResponse\022\\\n\021IsBalancerEnabled\022\"" + - ".hbase.pb.IsBalancerEnabledRequest\032#.hba" + - "se.pb.IsBalancerEnabledResponse\022S\n\016RunCa" + - "talogScan\022\037.hbase.pb.RunCatalogScanReque", - "st\032 .hbase.pb.RunCatalogScanResponse\022e\n\024" + - "EnableCatalogJanitor\022%.hbase.pb.EnableCa" + - "talogJanitorRequest\032&.hbase.pb.EnableCat" + - "alogJanitorResponse\022n\n\027IsCatalogJanitorE" + - "nabled\022(.hbase.pb.IsCatalogJanitorEnable" + - "dRequest\032).hbase.pb.IsCatalogJanitorEnab" + - "ledResponse\022^\n\021ExecMasterService\022#.hbase" + - ".pb.CoprocessorServiceRequest\032$.hbase.pb" + - ".CoprocessorServiceResponse\022A\n\010Snapshot\022" + - "\031.hbase.pb.SnapshotRequest\032\032.hbase.pb.Sn", - "apshotResponse\022h\n\025GetCompletedSnapshots\022" + - "&.hbase.pb.GetCompletedSnapshotsRequest\032" + - "\'.hbase.pb.GetCompletedSnapshotsResponse" + - "\022S\n\016DeleteSnapshot\022\037.hbase.pb.DeleteSnap" + - "shotRequest\032 .hbase.pb.DeleteSnapshotRes" + - "ponse\022S\n\016IsSnapshotDone\022\037.hbase.pb.IsSna" + - "pshotDoneRequest\032 .hbase.pb.IsSnapshotDo" + - "neResponse\022V\n\017RestoreSnapshot\022 .hbase.pb" + - ".RestoreSnapshotRequest\032!.hbase.pb.Resto" + - "reSnapshotResponse\022h\n\025IsRestoreSnapshotD", - "one\022&.hbase.pb.IsRestoreSnapshotDoneRequ" + - "est\032\'.hbase.pb.IsRestoreSnapshotDoneResp" + - "onse\022P\n\rExecProcedure\022\036.hbase.pb.ExecPro" + - "cedureRequest\032\037.hbase.pb.ExecProcedureRe" + - "sponse\022W\n\024ExecProcedureWithRet\022\036.hbase.p" + - "b.ExecProcedureRequest\032\037.hbase.pb.ExecPr" + - "ocedureResponse\022V\n\017IsProcedureDone\022 .hba" + - "se.pb.IsProcedureDoneRequest\032!.hbase.pb." + - "IsProcedureDoneResponse\022V\n\017ModifyNamespa" + - "ce\022 .hbase.pb.ModifyNamespaceRequest\032!.h", - "base.pb.ModifyNamespaceResponse\022V\n\017Creat" + - "eNamespace\022 .hbase.pb.CreateNamespaceReq" + - "uest\032!.hbase.pb.CreateNamespaceResponse\022" + - "V\n\017DeleteNamespace\022 .hbase.pb.DeleteName" + - "spaceRequest\032!.hbase.pb.DeleteNamespaceR" + - "esponse\022k\n\026GetNamespaceDescriptor\022\'.hbas" + - "e.pb.GetNamespaceDescriptorRequest\032(.hba" + - "se.pb.GetNamespaceDescriptorResponse\022q\n\030" + - "ListNamespaceDescriptors\022).hbase.pb.List" + - "NamespaceDescriptorsRequest\032*.hbase.pb.L", - "istNamespaceDescriptorsResponse\022\206\001\n\037List" + - "TableDescriptorsByNamespace\0220.hbase.pb.L" + - "istTableDescriptorsByNamespaceRequest\0321." + - "hbase.pb.ListTableDescriptorsByNamespace" + - "Response\022t\n\031ListTableNamesByNamespace\022*." + - "hbase.pb.ListTableNamesByNamespaceReques" + - "t\032+.hbase.pb.ListTableNamesByNamespaceRe" + - "sponse\022P\n\rGetTableState\022\036.hbase.pb.GetTa" + - "bleStateRequest\032\037.hbase.pb.GetTableState" + - "Response\022A\n\010SetQuota\022\031.hbase.pb.SetQuota", - "Request\032\032.hbase.pb.SetQuotaResponse\022x\n\037g" + - "etLastMajorCompactionTimestamp\022).hbase.p" + - "b.MajorCompactionTimestampRequest\032*.hbas" + - "e.pb.MajorCompactionTimestampResponse\022\212\001" + - "\n(getLastMajorCompactionTimestampForRegi" + - "on\0222.hbase.pb.MajorCompactionTimestampFo" + - "rRegionRequest\032*.hbase.pb.MajorCompactio" + - "nTimestampResponse\022_\n\022getProcedureResult" + - "\022#.hbase.pb.GetProcedureResultRequest\032$." + - "hbase.pb.GetProcedureResultResponse\022h\n\027g", - "etSecurityCapabilities\022%.hbase.pb.Securi" + - "tyCapabilitiesRequest\032&.hbase.pb.Securit" + - "yCapabilitiesResponseBB\n*org.apache.hado" + - "op.hbase.protobuf.generatedB\014MasterProto" + - "sH\001\210\001\001\240\001\001" + "\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProce" + + "dureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInter" + + "ruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortProced" + + "ureResponse\022\034\n\024is_procedure_aborted\030\001 \002(" + + "\010\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_name\030\001 \001(\t" + + "\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022" + + "\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb.TableName" + + "\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_globals\030\006" + + " \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.Thrott" + + "leRequest\"\022\n\020SetQuotaResponse\"J\n\037MajorCo", + "mpactionTimestampRequest\022\'\n\ntable_name\030\001" + + " \002(\0132\023.hbase.pb.TableName\"U\n(MajorCompac" + + "tionTimestampForRegionRequest\022)\n\006region\030" + + "\001 \002(\0132\031.hbase.pb.RegionSpecifier\"@\n Majo" + + "rCompactionTimestampResponse\022\034\n\024compacti" + + "on_timestamp\030\001 \002(\003\"\035\n\033SecurityCapabiliti" + + "esRequest\"\354\001\n\034SecurityCapabilitiesRespon" + + "se\022G\n\014capabilities\030\001 \003(\01621.hbase.pb.Secu" + + "rityCapabilitiesResponse.Capability\"\202\001\n\n" + + "Capability\022\031\n\025SIMPLE_AUTHENTICATION\020\000\022\031\n", + "\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTHORIZATIO" + + "N\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_VISI" + + "BILITY\020\0042\226$\n\rMasterService\022e\n\024GetSchemaA" + + "lterStatus\022%.hbase.pb.GetSchemaAlterStat" + + "usRequest\032&.hbase.pb.GetSchemaAlterStatu" + + "sResponse\022b\n\023GetTableDescriptors\022$.hbase" + + ".pb.GetTableDescriptorsRequest\032%.hbase.p" + + "b.GetTableDescriptorsResponse\022P\n\rGetTabl" + + "eNames\022\036.hbase.pb.GetTableNamesRequest\032\037" + + ".hbase.pb.GetTableNamesResponse\022Y\n\020GetCl", + "usterStatus\022!.hbase.pb.GetClusterStatusR" + + "equest\032\".hbase.pb.GetClusterStatusRespon" + + "se\022V\n\017IsMasterRunning\022 .hbase.pb.IsMaste" + + "rRunningRequest\032!.hbase.pb.IsMasterRunni" + + "ngResponse\022D\n\tAddColumn\022\032.hbase.pb.AddCo" + + "lumnRequest\032\033.hbase.pb.AddColumnResponse" + + "\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteColumn" + + "Request\032\036.hbase.pb.DeleteColumnResponse\022" + + "M\n\014ModifyColumn\022\035.hbase.pb.ModifyColumnR" + + "equest\032\036.hbase.pb.ModifyColumnResponse\022G", + "\n\nMoveRegion\022\033.hbase.pb.MoveRegionReques" + + "t\032\034.hbase.pb.MoveRegionResponse\022k\n\026Dispa" + + "tchMergingRegions\022\'.hbase.pb.DispatchMer" + + "gingRegionsRequest\032(.hbase.pb.DispatchMe" + + "rgingRegionsResponse\022M\n\014AssignRegion\022\035.h" + + "base.pb.AssignRegionRequest\032\036.hbase.pb.A" + + "ssignRegionResponse\022S\n\016UnassignRegion\022\037." + + "hbase.pb.UnassignRegionRequest\032 .hbase.p" + + "b.UnassignRegionResponse\022P\n\rOfflineRegio" + + "n\022\036.hbase.pb.OfflineRegionRequest\032\037.hbas", + "e.pb.OfflineRegionResponse\022J\n\013DeleteTabl" + + "e\022\034.hbase.pb.DeleteTableRequest\032\035.hbase." + + "pb.DeleteTableResponse\022P\n\rtruncateTable\022" + + "\036.hbase.pb.TruncateTableRequest\032\037.hbase." + + "pb.TruncateTableResponse\022J\n\013EnableTable\022" + + "\034.hbase.pb.EnableTableRequest\032\035.hbase.pb" + + ".EnableTableResponse\022M\n\014DisableTable\022\035.h" + + "base.pb.DisableTableRequest\032\036.hbase.pb.D" + + "isableTableResponse\022J\n\013ModifyTable\022\034.hba" + + "se.pb.ModifyTableRequest\032\035.hbase.pb.Modi", + "fyTableResponse\022J\n\013CreateTable\022\034.hbase.p" + + "b.CreateTableRequest\032\035.hbase.pb.CreateTa" + + "bleResponse\022A\n\010Shutdown\022\031.hbase.pb.Shutd" + + "ownRequest\032\032.hbase.pb.ShutdownResponse\022G" + + "\n\nStopMaster\022\033.hbase.pb.StopMasterReques" + + "t\032\034.hbase.pb.StopMasterResponse\022>\n\007Balan" + + "ce\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb." + + "BalanceResponse\022_\n\022SetBalancerRunning\022#." + + "hbase.pb.SetBalancerRunningRequest\032$.hba" + + "se.pb.SetBalancerRunningResponse\022\\\n\021IsBa", + "lancerEnabled\022\".hbase.pb.IsBalancerEnabl" + + "edRequest\032#.hbase.pb.IsBalancerEnabledRe" + + "sponse\022S\n\016RunCatalogScan\022\037.hbase.pb.RunC" + + "atalogScanRequest\032 .hbase.pb.RunCatalogS" + + "canResponse\022e\n\024EnableCatalogJanitor\022%.hb" + + "ase.pb.EnableCatalogJanitorRequest\032&.hba" + + "se.pb.EnableCatalogJanitorResponse\022n\n\027Is" + + "CatalogJanitorEnabled\022(.hbase.pb.IsCatal" + + "ogJanitorEnabledRequest\032).hbase.pb.IsCat" + + "alogJanitorEnabledResponse\022^\n\021ExecMaster", + "Service\022#.hbase.pb.CoprocessorServiceReq" + + "uest\032$.hbase.pb.CoprocessorServiceRespon" + + "se\022A\n\010Snapshot\022\031.hbase.pb.SnapshotReques" + + "t\032\032.hbase.pb.SnapshotResponse\022h\n\025GetComp" + + "letedSnapshots\022&.hbase.pb.GetCompletedSn" + + "apshotsRequest\032\'.hbase.pb.GetCompletedSn" + + "apshotsResponse\022S\n\016DeleteSnapshot\022\037.hbas" + + "e.pb.DeleteSnapshotRequest\032 .hbase.pb.De" + + "leteSnapshotResponse\022S\n\016IsSnapshotDone\022\037" + + ".hbase.pb.IsSnapshotDoneRequest\032 .hbase.", + "pb.IsSnapshotDoneResponse\022V\n\017RestoreSnap" + + "shot\022 .hbase.pb.RestoreSnapshotRequest\032!" + + ".hbase.pb.RestoreSnapshotResponse\022h\n\025IsR" + + "estoreSnapshotDone\022&.hbase.pb.IsRestoreS" + + "napshotDoneRequest\032\'.hbase.pb.IsRestoreS" + + "napshotDoneResponse\022P\n\rExecProcedure\022\036.h" + + "base.pb.ExecProcedureRequest\032\037.hbase.pb." + + "ExecProcedureResponse\022W\n\024ExecProcedureWi" + + "thRet\022\036.hbase.pb.ExecProcedureRequest\032\037." + + "hbase.pb.ExecProcedureResponse\022V\n\017IsProc", + "edureDone\022 .hbase.pb.IsProcedureDoneRequ" + + "est\032!.hbase.pb.IsProcedureDoneResponse\022V" + + "\n\017ModifyNamespace\022 .hbase.pb.ModifyNames" + + "paceRequest\032!.hbase.pb.ModifyNamespaceRe" + + "sponse\022V\n\017CreateNamespace\022 .hbase.pb.Cre" + + "ateNamespaceRequest\032!.hbase.pb.CreateNam" + + "espaceResponse\022V\n\017DeleteNamespace\022 .hbas" + + "e.pb.DeleteNamespaceRequest\032!.hbase.pb.D" + + "eleteNamespaceResponse\022k\n\026GetNamespaceDe" + + "scriptor\022\'.hbase.pb.GetNamespaceDescript", + "orRequest\032(.hbase.pb.GetNamespaceDescrip" + + "torResponse\022q\n\030ListNamespaceDescriptors\022" + + ").hbase.pb.ListNamespaceDescriptorsReque" + + "st\032*.hbase.pb.ListNamespaceDescriptorsRe" + + "sponse\022\206\001\n\037ListTableDescriptorsByNamespa" + + "ce\0220.hbase.pb.ListTableDescriptorsByName" + + "spaceRequest\0321.hbase.pb.ListTableDescrip" + + "torsByNamespaceResponse\022t\n\031ListTableName" + + "sByNamespace\022*.hbase.pb.ListTableNamesBy" + + "NamespaceRequest\032+.hbase.pb.ListTableNam", + "esByNamespaceResponse\022P\n\rGetTableState\022\036" + + ".hbase.pb.GetTableStateRequest\032\037.hbase.p" + + "b.GetTableStateResponse\022A\n\010SetQuota\022\031.hb" + + "ase.pb.SetQuotaRequest\032\032.hbase.pb.SetQuo" + + "taResponse\022x\n\037getLastMajorCompactionTime" + + "stamp\022).hbase.pb.MajorCompactionTimestam" + + "pRequest\032*.hbase.pb.MajorCompactionTimes" + + "tampResponse\022\212\001\n(getLastMajorCompactionT" + + "imestampForRegion\0222.hbase.pb.MajorCompac" + + "tionTimestampForRegionRequest\032*.hbase.pb", + ".MajorCompactionTimestampResponse\022_\n\022get" + + "ProcedureResult\022#.hbase.pb.GetProcedureR" + + "esultRequest\032$.hbase.pb.GetProcedureResu" + + "ltResponse\022h\n\027getSecurityCapabilities\022%." + + "hbase.pb.SecurityCapabilitiesRequest\032&.h" + + "base.pb.SecurityCapabilitiesResponse\022S\n\016" + + "AbortProcedure\022\037.hbase.pb.AbortProcedure" + + "Request\032 .hbase.pb.AbortProcedureRespons" + + "eBB\n*org.apache.hadoop.hbase.protobuf.ge" + + "neratedB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -57442,44 +58510,56 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultResponse_descriptor, new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", }); - internal_static_hbase_pb_SetQuotaRequest_descriptor = + internal_static_hbase_pb_AbortProcedureRequest_descriptor = getDescriptor().getMessageTypes().get(88); + internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_AbortProcedureRequest_descriptor, + new java.lang.String[] { "ProcId", "MayInterruptIfRunning", }); + internal_static_hbase_pb_AbortProcedureResponse_descriptor = + getDescriptor().getMessageTypes().get(89); + internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_AbortProcedureResponse_descriptor, + new java.lang.String[] { "IsProcedureAborted", }); + internal_static_hbase_pb_SetQuotaRequest_descriptor = + getDescriptor().getMessageTypes().get(90); internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaRequest_descriptor, new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); internal_static_hbase_pb_SetQuotaResponse_descriptor = - getDescriptor().getMessageTypes().get(89); + getDescriptor().getMessageTypes().get(91); internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor = - getDescriptor().getMessageTypes().get(90); + getDescriptor().getMessageTypes().get(92); internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(91); + getDescriptor().getMessageTypes().get(93); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor = - getDescriptor().getMessageTypes().get(92); + getDescriptor().getMessageTypes().get(94); internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor, new java.lang.String[] { "CompactionTimestamp", }); internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor = - getDescriptor().getMessageTypes().get(93); + getDescriptor().getMessageTypes().get(95); internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor = - getDescriptor().getMessageTypes().get(94); + getDescriptor().getMessageTypes().get(96); internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index a75ce793386..2cd0b5f0bd4 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -396,21 +396,21 @@ message IsMasterRunningResponse { } message ExecProcedureRequest { - required ProcedureDescription procedure = 1; + required ProcedureDescription procedure = 1; } message ExecProcedureResponse { - optional int64 expected_timeout = 1; - optional bytes return_data = 2; + optional int64 expected_timeout = 1; + optional bytes return_data = 2; } message IsProcedureDoneRequest { - optional ProcedureDescription procedure = 1; + optional ProcedureDescription procedure = 1; } message IsProcedureDoneResponse { - optional bool done = 1 [default = false]; - optional ProcedureDescription snapshot = 2; + optional bool done = 1 [default = false]; + optional ProcedureDescription snapshot = 2; } message GetProcedureResultRequest { @@ -431,6 +431,15 @@ message GetProcedureResultResponse { optional ForeignExceptionMessage exception = 5; } +message AbortProcedureRequest { + required uint64 proc_id = 1; + optional bool mayInterruptIfRunning = 2 [default = true]; +} + +message AbortProcedureResponse { + required bool is_procedure_aborted = 1; +} + message SetQuotaRequest { optional string user_name = 1; optional string user_group = 2; @@ -707,4 +716,8 @@ service MasterService { /** Returns the security capabilities in effect on the cluster */ rpc getSecurityCapabilities(SecurityCapabilitiesRequest) returns(SecurityCapabilitiesResponse); + + /** Abort a procedure */ + rpc AbortProcedure(AbortProcedureRequest) + returns(AbortProcedureResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index e5371617363..7d59c382f34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -388,7 +388,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // should we check encryption settings at master side, default true this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true); - this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this)); + this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this)); // preload table descriptor at startup this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true); @@ -2480,6 +2480,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return descriptors; } + @Override + public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) { + return this.procedureExecutor.abort(procId, mayInterruptIfRunning); + } + @Override public List listTableDescriptorsByNamespace(String name) throws IOException { ensureNamespaceExists(name); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index d70c0dab0c7..85e3accc0ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -65,6 +65,8 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; @@ -1060,6 +1062,17 @@ public class MasterRpcServices extends RSRpcServices } } + @Override + public AbortProcedureResponse abortProcedure( + RpcController rpcController, + AbortProcedureRequest request) { + AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder(); + boolean abortResult = + master.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning()); + response.setIsProcedureAborted(abortResult); + return response.build(); + } + @Override public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c, ListNamespaceDescriptorsRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 626f8c89c60..e7f4f21914d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -320,6 +320,14 @@ public interface MasterServices extends Server { final long nonceGroup, final long nonce) throws IOException; + /** + * Abort a procedure. + * @param procId ID of the procedure + * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? + * @return true if aborted, false if procedure already completed or does not exist + */ + public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning); + /** * Get a namespace descriptor by name * @param name name of namespace descriptor diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 5a95d618b5f..dc060715073 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -25,6 +25,8 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.List; +import java.util.Random; +import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; @@ -727,4 +729,13 @@ public class TestAdmin2 { // Current state should be the original state again assertEquals(initialState, admin.isBalancerEnabled()); } + + @Test(timeout = 30000) + public void testAbortProcedureFail() throws Exception { + Random randomGenerator = new Random(); + long procId = randomGenerator.nextLong(); + + boolean abortResult = admin.abortProcedure(procId, true); + assertFalse(abortResult); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 33a64a57bfe..c7fe187194b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -436,6 +436,11 @@ public class TestCatalogJanitor { return null; //To change body of implemented methods use File | Settings | File Templates. } + @Override + public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) { + return false; //To change body of implemented methods use File | Settings | File Templates. + } + @Override public List listTableDescriptorsByNamespace(String name) throws IOException { return null; //To change body of implemented methods use File | Settings | File Templates. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java new file mode 100644 index 00000000000..d304ecdb1d0 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.*; + +@Category({MasterTests.class, MediumTests.class}) +public class TestProcedureAdmin { + private static final Log LOG = LogFactory.getLog(TestProcedureAdmin.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private long nonceGroup = HConstants.NO_NONCE; + private long nonce = HConstants.NO_NONCE; + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + assertTrue("expected executor to be running", procExec.isRunning()); + + nonceGroup = + MasterProcedureTestingUtility.generateNonceGroup(UTIL.getHBaseCluster().getMaster()); + nonce = MasterProcedureTestingUtility.generateNonce(UTIL.getHBaseCluster().getMaster()); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout=60000) + public void testAbortProcedureSuccess() throws Exception { + final TableName tableName = TableName.valueOf("testAbortProcedureSuccess"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + // Submit an abortable procedure + long procId = procExec.submitProcedure( + new DisableTableProcedure(procExec.getEnvironment(), tableName, false), nonceGroup, nonce); + + boolean abortResult = procExec.abort(procId, true); + assertTrue(abortResult); + + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + ProcedureTestingUtility.restart(procExec); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + // Validate the disable table procedure was aborted successfully + MasterProcedureTestingUtility.validateTableIsEnabled( + UTIL.getHBaseCluster().getMaster(), + tableName); + } + + @Test(timeout=60000) + public void testAbortProcedureFailure() throws Exception { + final TableName tableName = TableName.valueOf("testAbortProcedureFailure"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo[] regions = + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); + UTIL.getHBaseAdmin().disableTable(tableName); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + // Submit an un-abortable procedure + long procId = procExec.submitProcedure( + new DeleteTableProcedure(procExec.getEnvironment(), tableName), nonceGroup, nonce); + + boolean abortResult = procExec.abort(procId, true); + assertFalse(abortResult); + + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + ProcedureTestingUtility.restart(procExec); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + // Validate the delete table procedure was not aborted + MasterProcedureTestingUtility.validateTableDeletion( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f"); + } + + @Test(timeout=60000) + public void testAbortProcedureInterruptedNotAllowed() throws Exception { + final TableName tableName = TableName.valueOf("testAbortProcedureInterruptedNotAllowed"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo[] regions = + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + // Submit a procedure + long procId = procExec.submitProcedure( + new DisableTableProcedure(procExec.getEnvironment(), tableName, true), nonceGroup, nonce); + // Wait for one step to complete + ProcedureTestingUtility.waitProcedure(procExec, procId); + + // Set the mayInterruptIfRunning flag to false + boolean abortResult = procExec.abort(procId, false); + assertFalse(abortResult); + + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + ProcedureTestingUtility.restart(procExec); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + // Validate the delete table procedure was not aborted + MasterProcedureTestingUtility.validateTableIsDisabled( + UTIL.getHBaseCluster().getMaster(), tableName); + } + + @Test(timeout=60000) + public void testAbortNonExistProcedure() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + Random randomGenerator = new Random(); + long procId; + // Generate a non-existing procedure + do { + procId = randomGenerator.nextLong(); + } while (procExec.getResult(procId) != null); + + boolean abortResult = procExec.abort(procId, true); + assertFalse(abortResult); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +}