From 67149d253b8e4f166f402b1b849e5774140e52df Mon Sep 17 00:00:00 2001 From: Matteo Bertozzi Date: Thu, 9 Apr 2015 21:01:20 +0100 Subject: [PATCH] HBASE-13204 Procedure v2 - client create/delete table sync --- .../hbase/client/ConnectionManager.java | 10 +- .../hadoop/hbase/client/HBaseAdmin.java | 611 ++++- .../hbase/client/TestProcedureFuture.java | 185 ++ .../protobuf/generated/MasterProtos.java | 2192 +++++++++++++++-- hbase-protocol/src/main/protobuf/Master.proto | 24 + .../apache/hadoop/hbase/master/HMaster.java | 12 +- .../hbase/master/MasterRpcServices.java | 51 +- .../hadoop/hbase/master/MasterServices.java | 4 +- .../procedure/DeleteTableProcedure.java | 1 + .../hbase/master/TestCatalogJanitor.java | 7 +- .../hadoop/hbase/util/TestHBaseFsck.java | 1 + 11 files changed, 2731 insertions(+), 367 deletions(-) create mode 100644 hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index c1e9644d0f1..21d0ce856c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -118,6 +118,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescripto import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; @@ -1906,6 +1908,12 @@ class ConnectionManager { return stub.isProcedureDone(controller, request); } + @Override + public GetProcedureResultResponse getProcedureResult(RpcController controller, + GetProcedureResultRequest request) throws ServiceException { + return stub.getProcedureResult(controller, request); + } + @Override public IsMasterRunningResponse isMasterRunning( RpcController controller, IsMasterRunningRequest request) @@ -1990,7 +1998,7 @@ class ConnectionManager { throws ServiceException { return stub.getClusterStatus(controller, request); } - + @Override public SetQuotaResponse setQuota(RpcController controller, SetQuotaRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 30dc6cbb891..c2096be8845 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -30,6 +30,10 @@ import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.regex.Pattern; import org.apache.commons.logging.Log; @@ -63,6 +67,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel; @@ -90,10 +95,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest; @@ -102,6 +109,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResp import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; @@ -143,6 +152,7 @@ import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -186,6 +196,7 @@ public class HBaseAdmin implements Admin { // numRetries is for 'normal' stuff... Multiply by this factor when // want to wait a long time. private final int retryLongerMultiplier; + private final int syncWaitTimeout; private boolean aborted; private boolean cleanupConnectionOnClose = false; // close the connection in close() private boolean closed = false; @@ -244,6 +255,8 @@ public class HBaseAdmin implements Admin { "hbase.client.retries.longer.multiplier", 10); this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + this.syncWaitTimeout = this.conf.getInt( + "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf); } @@ -538,83 +551,23 @@ public class HBaseAdmin implements Admin { */ @Override public void createTable(final HTableDescriptor desc, byte [][] splitKeys) - throws IOException { + throws IOException { + Future future = createTableAsyncV2(desc, splitKeys); try { - createTableAsync(desc, splitKeys); - } catch (SocketTimeoutException ste) { - LOG.warn("Creating " + desc.getTableName() + " took too long", ste); - } - int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication(); - int prevRegCount = 0; - boolean doneWithMetaScan = false; - for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; - ++tries) { - if (!doneWithMetaScan) { - // Wait for new table to come on-line - final AtomicInteger actualRegCount = new AtomicInteger(0); - MetaScannerVisitor visitor = new MetaScannerVisitorBase() { - @Override - public boolean processRow(Result rowResult) throws IOException { - RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult); - if (list == null) { - LOG.warn("No serialized HRegionInfo in " + rowResult); - return true; - } - HRegionLocation l = list.getRegionLocation(); - if (l == null) { - return true; - } - if (!l.getRegionInfo().getTable().equals(desc.getTableName())) { - return false; - } - if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true; - HRegionLocation[] locations = list.getRegionLocations(); - for (HRegionLocation location : locations) { - if (location == null) continue; - ServerName serverName = location.getServerName(); - // Make sure that regions are assigned to server - if (serverName != null && serverName.getHostAndPort() != null) { - actualRegCount.incrementAndGet(); - } - } - return true; - } - }; - MetaScanner.metaScan(connection, visitor, desc.getTableName()); - if (actualRegCount.get() < numRegs) { - if (tries == this.numRetries * this.retryLongerMultiplier - 1) { - throw new RegionOfflineException("Only " + actualRegCount.get() + - " of " + numRegs + " regions are online; retries exhausted."); - } - try { // Sleep - Thread.sleep(getPauseTime(tries)); - } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted when opening" + - " regions; " + actualRegCount.get() + " of " + numRegs + - " regions processed so far"); - } - if (actualRegCount.get() > prevRegCount) { // Making progress - prevRegCount = actualRegCount.get(); - tries = -1; - } - } else { - doneWithMetaScan = true; - tries = -1; - } - } else if (isTableEnabled(desc.getTableName())) { - return; + // TODO: how long should we wait? spin forever? + future.get(syncWaitTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when waiting" + + " for table to be enabled; meta scan was done"); + } catch (TimeoutException e) { + throw new TimeoutIOException(e); + } catch (ExecutionException e) { + if (e.getCause() instanceof IOException) { + throw (IOException)e.getCause(); } else { - try { // Sleep - Thread.sleep(getPauseTime(tries)); - } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted when waiting" + - " for table to be enabled; meta scan was done"); - } + throw new IOException(e.getCause()); } } - throw new TableNotEnabledException( - "Retries exhausted while still waiting for table: " - + desc.getTableName() + " to be enabled"); } /** @@ -634,22 +587,42 @@ public class HBaseAdmin implements Admin { * @throws IOException */ @Override - public void createTableAsync( - final HTableDescriptor desc, final byte [][] splitKeys) - throws IOException { - if(desc.getTableName() == null) { + public void createTableAsync(final HTableDescriptor desc, final byte [][] splitKeys) + throws IOException { + createTableAsyncV2(desc, splitKeys); + } + + /** + * Creates a new table but does not block and wait for it to come online. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * It may throw ExecutionException if there was an error while executing the operation + * or TimeoutException in case the wait timeout was not long enough to allow the + * operation to complete. + * + * @param desc table descriptor for table + * @param splitKeys keys to check if the table has been created with all split keys + * @throws IllegalArgumentException Bad table name, if the split keys + * are repeated and if the split key has empty byte array. + * @throws IOException if a remote or network exception occurs + * @return the result of the async creation. You can use Future.get(long, TimeUnit) + * to wait on the operation to complete. + */ + // TODO: This should be called Async but it will break binary compatibility + private Future createTableAsyncV2(final HTableDescriptor desc, final byte[][] splitKeys) + throws IOException { + if (desc.getTableName() == null) { throw new IllegalArgumentException("TableName cannot be null"); } - if(splitKeys != null && splitKeys.length > 0) { + if (splitKeys != null && splitKeys.length > 0) { Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR); // Verify there are no duplicate split keys - byte [] lastKey = null; - for(byte [] splitKey : splitKeys) { + byte[] lastKey = null; + for (byte[] splitKey : splitKeys) { if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) { throw new IllegalArgumentException( "Empty split key must not be passed in the split keys."); } - if(lastKey != null && Bytes.equals(splitKey, lastKey)) { + if (lastKey != null && Bytes.equals(splitKey, lastKey)) { throw new IllegalArgumentException("All split keys must be unique, " + "found duplicate: " + Bytes.toStringBinary(splitKey) + ", " + Bytes.toStringBinary(lastKey)); @@ -658,14 +631,126 @@ public class HBaseAdmin implements Admin { } } - executeCallable(new MasterCallable(getConnection()) { + CreateTableResponse response = executeCallable( + new MasterCallable(getConnection()) { @Override - public Void call(int callTimeout) throws ServiceException { + public CreateTableResponse call(int callTimeout) throws ServiceException { CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys); - master.createTable(null, request); - return null; + return master.createTable(null, request); } }); + return new CreateTableFuture(this, desc, splitKeys, response); + } + + private static class CreateTableFuture extends ProcedureFuture { + private final HTableDescriptor desc; + private final byte[][] splitKeys; + + public CreateTableFuture(final HBaseAdmin admin, final HTableDescriptor desc, + final byte[][] splitKeys, final CreateTableResponse response) { + super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null); + this.splitKeys = splitKeys; + this.desc = desc; + } + + @Override + protected Void waitOperationResult(final long deadlineTs) + throws IOException, TimeoutException { + waitForTableEnabled(deadlineTs); + waitForAllRegionsOnline(deadlineTs); + return null; + } + + @Override + protected Void postOperationResult(final Void result, final long deadlineTs) + throws IOException, TimeoutException { + LOG.info("Created " + desc.getTableName()); + return result; + } + + private void waitForTableEnabled(final long deadlineTs) + throws IOException, TimeoutException { + waitForState(deadlineTs, new WaitForStateCallable() { + @Override + public boolean checkState(int tries) throws IOException { + try { + if (getAdmin().isTableAvailable(desc.getTableName())) { + return true; + } + } catch (TableNotFoundException tnfe) { + LOG.debug("Table "+ desc.getTableName() +" was not enabled, sleeping. tries="+ tries); + } + return false; + } + + @Override + public void throwInterruptedException() throws InterruptedIOException { + throw new InterruptedIOException("Interrupted when waiting for table " + + desc.getTableName() + " to be enabled"); + } + + @Override + public void throwTimeoutException(long elapsedTime) throws TimeoutException { + throw new TimeoutException("Table " + desc.getTableName() + + " not enabled after " + elapsedTime + "msec"); + } + }); + } + + private void waitForAllRegionsOnline(final long deadlineTs) + throws IOException, TimeoutException { + final AtomicInteger actualRegCount = new AtomicInteger(0); + final MetaScannerVisitor visitor = new MetaScannerVisitorBase() { + @Override + public boolean processRow(Result rowResult) throws IOException { + RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult); + if (list == null) { + LOG.warn("No serialized HRegionInfo in " + rowResult); + return true; + } + HRegionLocation l = list.getRegionLocation(); + if (l == null) { + return true; + } + if (!l.getRegionInfo().getTable().equals(desc.getTableName())) { + return false; + } + if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true; + HRegionLocation[] locations = list.getRegionLocations(); + for (HRegionLocation location : locations) { + if (location == null) continue; + ServerName serverName = location.getServerName(); + // Make sure that regions are assigned to server + if (serverName != null && serverName.getHostAndPort() != null) { + actualRegCount.incrementAndGet(); + } + } + return true; + } + }; + + int tries = 0; + IOException serverEx = null; + int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication(); + while (EnvironmentEdgeManager.currentTime() < deadlineTs) { + actualRegCount.set(0); + MetaScanner.metaScan(getAdmin().getConnection(), visitor, desc.getTableName()); + if (actualRegCount.get() == numRegs) { + // all the regions are online + return; + } + + try { + Thread.sleep(getAdmin().getPauseTime(tries++)); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when opening" + + " regions; " + actualRegCount.get() + " of " + numRegs + + " regions processed so far"); + } + } + throw new TimeoutException("Only " + actualRegCount.get() + + " of " + numRegs + " regions are online; retries exhausted."); + } } public void deleteTable(final String tableName) throws IOException { @@ -685,61 +770,93 @@ public class HBaseAdmin implements Admin { */ @Override public void deleteTable(final TableName tableName) throws IOException { - boolean tableExists = true; + Future future = deleteTableAsyncV2(tableName); + try { + future.get(syncWaitTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when waiting for table to be deleted"); + } catch (TimeoutException e) { + throw new TimeoutIOException(e); + } catch (ExecutionException e) { + if (e.getCause() instanceof IOException) { + throw (IOException)e.getCause(); + } else { + throw new IOException(e.getCause()); + } + } + } - executeCallable(new MasterCallable(getConnection()) { + /** + * Deletes the table but does not block and wait for it be completely removed. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * It may throw ExecutionException if there was an error while executing the operation + * or TimeoutException in case the wait timeout was not long enough to allow the + * operation to complete. + * + * @param desc table descriptor for table + * @param tableName name of table to delete + * @throws IOException if a remote or network exception occurs + * @return the result of the async delete. You can use Future.get(long, TimeUnit) + * to wait on the operation to complete. + */ + // TODO: This should be called Async but it will break binary compatibility + private Future deleteTableAsyncV2(final TableName tableName) throws IOException { + DeleteTableResponse response = executeCallable( + new MasterCallable(getConnection()) { @Override - public Void call(int callTimeout) throws ServiceException { + public DeleteTableResponse call(int callTimeout) throws ServiceException { DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName); - master.deleteTable(null,req); - return null; + return master.deleteTable(null,req); } }); + return new DeleteTableFuture(this, tableName, response); + } - int failures = 0; - // Wait until all regions deleted - for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { - try { - // Find whether all regions are deleted. - List regionLations = - MetaScanner.listTableRegionLocations(conf, connection, tableName); + private static class DeleteTableFuture extends ProcedureFuture { + private final TableName tableName; - // let us wait until hbase:meta table is updated and - // HMaster removes the table from its HTableDescriptors - if (regionLations == null || regionLations.size() == 0) { - HTableDescriptor htd = getTableDescriptorByTableName(tableName); - - if (htd == null) { - // table could not be found in master - we are done. - tableExists = false; - break; - } - } - } catch (IOException ex) { - failures++; - if(failures >= numRetries - 1) { // no more tries left - if (ex instanceof RemoteException) { - throw ((RemoteException) ex).unwrapRemoteException(); - } else { - throw ex; - } - } - } - try { - Thread.sleep(getPauseTime(tries)); - } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted when waiting" + - " for table to be deleted"); - } + public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName, + final DeleteTableResponse response) { + super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null); + this.tableName = tableName; } - if (tableExists) { - throw new IOException("Retries exhausted, it took too long to wait"+ - " for the table " + tableName + " to be deleted."); + @Override + protected Void waitOperationResult(final long deadlineTs) + throws IOException, TimeoutException { + waitTableNotFound(deadlineTs); + return null; + } + + @Override + protected Void postOperationResult(final Void result, final long deadlineTs) + throws IOException, TimeoutException { + // Delete cached information to prevent clients from using old locations + getAdmin().getConnection().clearRegionCache(tableName); + LOG.info("Deleted " + tableName); + return result; + } + + private void waitTableNotFound(final long deadlineTs) + throws IOException, TimeoutException { + waitForState(deadlineTs, new WaitForStateCallable() { + @Override + public boolean checkState(int tries) throws IOException { + return !getAdmin().tableExists(tableName); + } + + @Override + public void throwInterruptedException() throws InterruptedIOException { + throw new InterruptedIOException("Interrupted when waiting for table to be deleted"); + } + + @Override + public void throwTimeoutException(long elapsedTime) throws TimeoutException { + throw new TimeoutException("Table " + tableName + " not yet deleted after " + + elapsedTime + "msec"); + } + }); } - // Delete cached information to prevent clients from using old locations - this.connection.clearRegionCache(tableName); - LOG.info("Deleted " + tableName); } /** @@ -3636,7 +3753,7 @@ public class HBaseAdmin implements Admin { } }); } - + /** * Apply the new quota settings. * @param quota the quota settings @@ -3800,4 +3917,236 @@ public class HBaseAdmin implements Admin { } }); } + + /** + * Future that waits on a procedure result. + * Returned by the async version of the Admin calls, + * and used internally by the sync calls to wait on the result of the procedure. + */ + @InterfaceAudience.Private + @InterfaceStability.Evolving + protected static class ProcedureFuture implements Future { + private ExecutionException exception = null; + private boolean procResultFound = false; + private boolean done = false; + private V result = null; + + private final HBaseAdmin admin; + private final Long procId; + + public ProcedureFuture(final HBaseAdmin admin, final Long procId) { + this.admin = admin; + this.procId = procId; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isCancelled() { + // TODO: Abort not implemented yet + return false; + } + + @Override + public V get() throws InterruptedException, ExecutionException { + // TODO: should we ever spin forever? + throw new UnsupportedOperationException(); + } + + @Override + public V get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + if (!done) { + long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout); + try { + try { + // if the master support procedures, try to wait the result + if (procId != null) { + result = waitProcedureResult(procId, deadlineTs); + } + // if we don't have a proc result, try the compatibility wait + if (!procResultFound) { + result = waitOperationResult(deadlineTs); + } + result = postOperationResult(result, deadlineTs); + done = true; + } catch (IOException e) { + result = postOpeartionFailure(e, deadlineTs); + done = true; + } + } catch (IOException e) { + exception = new ExecutionException(e); + done = true; + } + } + if (exception != null) { + throw exception; + } + return result; + } + + @Override + public boolean isDone() { + return done; + } + + protected HBaseAdmin getAdmin() { + return admin; + } + + private V waitProcedureResult(long procId, long deadlineTs) + throws IOException, TimeoutException, InterruptedException { + GetProcedureResultRequest request = GetProcedureResultRequest.newBuilder() + .setProcId(procId) + .build(); + + int tries = 0; + IOException serviceEx = null; + while (EnvironmentEdgeManager.currentTime() < deadlineTs) { + GetProcedureResultResponse response = null; + try { + // Try to fetch the result + response = getProcedureResult(request); + } catch (IOException e) { + serviceEx = unwrapException(e); + + // the master may be down + LOG.warn("failed to get the procedure result procId=" + procId, serviceEx); + + // Not much to do, if we have a DoNotRetryIOException + if (serviceEx instanceof DoNotRetryIOException) { + // TODO: looks like there is no way to unwrap this exception and get the proper + // UnsupportedOperationException aside from looking at the message. + // anyway, if we fail here we just failover to the compatibility side + // and that is always a valid solution. + LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx); + procResultFound = false; + return null; + } + } + + // If the procedure is no longer running, we should have a result + if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) { + procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND; + return convertResult(response); + } + + try { + Thread.sleep(getAdmin().getPauseTime(tries++)); + } catch (InterruptedException e) { + throw new InterruptedException( + "Interrupted while waiting for the result of proc " + procId); + } + } + if (serviceEx != null) { + throw serviceEx; + } else { + throw new TimeoutException("The procedure " + procId + " is still running"); + } + } + + private static IOException unwrapException(IOException e) { + if (e instanceof RemoteException) { + return ((RemoteException)e).unwrapRemoteException(); + } + return e; + } + + protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request) + throws IOException { + return admin.executeCallable(new MasterCallable( + admin.getConnection()) { + @Override + public GetProcedureResultResponse call(int callTimeout) throws ServiceException { + return master.getProcedureResult(null, request); + } + }); + } + + /** + * Convert the procedure result response to a specified type. + * @param response the procedure result object to parse + * @return the result data of the procedure. + */ + protected V convertResult(final GetProcedureResultResponse response) throws IOException { + if (response.hasException()) { + throw ForeignExceptionUtil.toIOException(response.getException()); + } + return null; + } + + /** + * Fallback implementation in case the procedure is not supported by the server. + * It should try to wait until the operation is completed. + * @param deadlineTs the timestamp after which this method should throw a TimeoutException + * @return the result data of the operation + */ + protected V waitOperationResult(final long deadlineTs) + throws IOException, TimeoutException { + return null; + } + + /** + * Called after the operation is completed and the result fetched. + * this allows to perform extra steps after the procedure is completed. + * it allows to apply transformations to the result that will be returned by get(). + * @param result the result of the procedure + * @param deadlineTs the timestamp after which this method should throw a TimeoutException + * @return the result of the procedure, which may be the same as the passed one + */ + protected V postOperationResult(final V result, final long deadlineTs) + throws IOException, TimeoutException { + return result; + } + + /** + * Called after the operation is terminated with a failure. + * this allows to perform extra steps after the procedure is terminated. + * it allows to apply transformations to the result that will be returned by get(). + * The default implementation will rethrow the exception + * @param exception the exception got from fetching the result + * @param deadlineTs the timestamp after which this method should throw a TimeoutException + * @return the result of the procedure, which may be the same as the passed one + */ + protected V postOpeartionFailure(final IOException exception, final long deadlineTs) + throws IOException, TimeoutException { + throw exception; + } + + protected interface WaitForStateCallable { + boolean checkState(int tries) throws IOException; + void throwInterruptedException() throws InterruptedIOException; + void throwTimeoutException(long elapsed) throws TimeoutException; + } + + protected void waitForState(final long deadlineTs, final WaitForStateCallable callable) + throws IOException, TimeoutException { + int tries = 0; + IOException serverEx = null; + long startTime = EnvironmentEdgeManager.currentTime(); + while (EnvironmentEdgeManager.currentTime() < deadlineTs) { + serverEx = null; + try { + if (callable.checkState(tries)) { + return; + } + } catch (IOException e) { + serverEx = e; + } + try { + Thread.sleep(getAdmin().getPauseTime(tries++)); + } catch (InterruptedException e) { + callable.throwInterruptedException(); + } + } + if (serverEx != null) { + throw unwrapException(serverEx); + } else { + callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime); + } + } + } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java new file mode 100644 index 00000000000..4bd6571e8be --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java @@ -0,0 +1,185 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse; + +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.mockito.Mockito; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category(SmallTests.class) +public class TestProcedureFuture { + private static class TestFuture extends HBaseAdmin.ProcedureFuture { + private boolean postOperationResultCalled = false; + private boolean waitOperationResultCalled = false; + private boolean getProcedureResultCalled = false; + private boolean convertResultCalled = false; + + public TestFuture(final HBaseAdmin admin, final Long procId) { + super(admin, procId); + } + + public boolean wasPostOperationResultCalled() { + return postOperationResultCalled; + } + + public boolean wasWaitOperationResultCalled() { + return waitOperationResultCalled; + } + + public boolean wasGetProcedureResultCalled() { + return getProcedureResultCalled; + } + + public boolean wasConvertResultCalled() { + return convertResultCalled; + } + + @Override + protected GetProcedureResultResponse getProcedureResult( + final GetProcedureResultRequest request) throws IOException { + getProcedureResultCalled = true; + return GetProcedureResultResponse.newBuilder() + .setState(GetProcedureResultResponse.State.FINISHED) + .build(); + } + + @Override + protected Void convertResult(final GetProcedureResultResponse response) throws IOException { + convertResultCalled = true; + return null; + } + + @Override + protected Void waitOperationResult(final long deadlineTs) + throws IOException, TimeoutException { + waitOperationResultCalled = true; + return null; + } + + @Override + protected Void postOperationResult(final Void result, final long deadlineTs) + throws IOException, TimeoutException { + postOperationResultCalled = true; + return result; + } + } + + /** + * When a master return a result with procId, + * we are skipping the waitOperationResult() call, + * since we are getting the procedure result. + */ + @Test(timeout=60000) + public void testWithProcId() throws Exception { + HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); + TestFuture f = new TestFuture(admin, 100L); + f.get(1, TimeUnit.MINUTES); + + assertTrue("expected getProcedureResult() to be called", f.wasGetProcedureResultCalled()); + assertTrue("expected convertResult() to be called", f.wasConvertResultCalled()); + assertFalse("unexpected waitOperationResult() called", f.wasWaitOperationResultCalled()); + assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled()); + } + + /** + * Verify that the spin loop for the procedure running works. + */ + @Test(timeout=60000) + public void testWithProcIdAndSpinning() throws Exception { + final AtomicInteger spinCount = new AtomicInteger(0); + HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); + TestFuture f = new TestFuture(admin, 100L) { + @Override + protected GetProcedureResultResponse getProcedureResult( + final GetProcedureResultRequest request) throws IOException { + boolean done = spinCount.incrementAndGet() >= 10; + return GetProcedureResultResponse.newBuilder() + .setState(done ? GetProcedureResultResponse.State.FINISHED : + GetProcedureResultResponse.State.RUNNING) + .build(); + } + }; + f.get(1, TimeUnit.MINUTES); + + assertEquals(10, spinCount.get()); + assertTrue("expected convertResult() to be called", f.wasConvertResultCalled()); + assertFalse("unexpected waitOperationResult() called", f.wasWaitOperationResultCalled()); + assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled()); + } + + /** + * When a master return a result without procId, + * we are skipping the getProcedureResult() call. + */ + @Test(timeout=60000) + public void testWithoutProcId() throws Exception { + HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); + TestFuture f = new TestFuture(admin, null); + f.get(1, TimeUnit.MINUTES); + + assertFalse("unexpected getProcedureResult() called", f.wasGetProcedureResultCalled()); + assertFalse("unexpected convertResult() called", f.wasConvertResultCalled()); + assertTrue("expected waitOperationResult() to be called", f.wasWaitOperationResultCalled()); + assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled()); + } + + /** + * When a new client with procedure support tries to ask an old-master without proc-support + * the procedure result we get a DoNotRetryIOException (which is an UnsupportedOperationException) + * The future should trap that and fallback to the waitOperationResult(). + * + * This happens when the operation calls happens on a "new master" but while we are waiting + * the operation to be completed, we failover on an "old master". + */ + @Test(timeout=60000) + public void testOnServerWithNoProcedureSupport() throws Exception { + HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); + TestFuture f = new TestFuture(admin, 100L) { + @Override + protected GetProcedureResultResponse getProcedureResult( + final GetProcedureResultRequest request) throws IOException { + super.getProcedureResult(request); + throw new DoNotRetryIOException(new UnsupportedOperationException("getProcedureResult")); + } + }; + f.get(1, TimeUnit.MINUTES); + + assertTrue("expected getProcedureResult() to be called", f.wasGetProcedureResultCalled()); + assertFalse("unexpected convertResult() called", f.wasConvertResultCalled()); + assertTrue("expected waitOperationResult() to be called", f.wasWaitOperationResultCalled()); + assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled()); + } +} \ No newline at end of file diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 9ebd9d64d82..8e40ec07b42 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -9074,6 +9074,16 @@ public final class MasterProtos { public interface CreateTableResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); } /** * Protobuf type {@code CreateTableResponse} @@ -9108,6 +9118,7 @@ public final class MasterProtos { com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -9125,6 +9136,11 @@ public final class MasterProtos { } break; } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -9164,7 +9180,25 @@ public final class MasterProtos { return PARSER; } + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + private void initFields() { + procId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -9178,6 +9212,9 @@ public final class MasterProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } getUnknownFields().writeTo(output); } @@ -9187,6 +9224,10 @@ public final class MasterProtos { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -9210,6 +9251,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse) obj; boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -9223,6 +9269,10 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -9332,6 +9382,8 @@ public final class MasterProtos { public Builder clear() { super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -9358,6 +9410,13 @@ public final class MasterProtos { public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -9373,6 +9432,9 @@ public final class MasterProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -9398,6 +9460,40 @@ public final class MasterProtos { } return this; } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } // @@protoc_insertion_point(builder_scope:CreateTableResponse) } @@ -9973,6 +10069,16 @@ public final class MasterProtos { public interface DeleteTableResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); } /** * Protobuf type {@code DeleteTableResponse} @@ -10007,6 +10113,7 @@ public final class MasterProtos { com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -10024,6 +10131,11 @@ public final class MasterProtos { } break; } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -10063,7 +10175,25 @@ public final class MasterProtos { return PARSER; } + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + private void initFields() { + procId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -10077,6 +10207,9 @@ public final class MasterProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } getUnknownFields().writeTo(output); } @@ -10086,6 +10219,10 @@ public final class MasterProtos { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -10109,6 +10246,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse) obj; boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -10122,6 +10264,10 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -10231,6 +10377,8 @@ public final class MasterProtos { public Builder clear() { super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -10257,6 +10405,13 @@ public final class MasterProtos { public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -10272,6 +10427,9 @@ public final class MasterProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -10297,6 +10455,40 @@ public final class MasterProtos { } return this; } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } // @@protoc_insertion_point(builder_scope:DeleteTableResponse) } @@ -42255,6 +42447,1464 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:IsProcedureDoneResponse) } + public interface GetProcedureResultRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 proc_id = 1; + /** + * required uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * required uint64 proc_id = 1; + */ + long getProcId(); + } + /** + * Protobuf type {@code GetProcedureResultRequest} + */ + public static final class GetProcedureResultRequest extends + com.google.protobuf.GeneratedMessage + implements GetProcedureResultRequestOrBuilder { + // Use GetProcedureResultRequest.newBuilder() to construct. + private GetProcedureResultRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetProcedureResultRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetProcedureResultRequest defaultInstance; + public static GetProcedureResultRequest getDefaultInstance() { + return defaultInstance; + } + + public GetProcedureResultRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetProcedureResultRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetProcedureResultRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetProcedureResultRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * required uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + private void initFields() { + procId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasProcId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetProcedureResultRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasProcId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint64 proc_id = 1; + private long procId_ ; + /** + * required uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * required uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * required uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:GetProcedureResultRequest) + } + + static { + defaultInstance = new GetProcedureResultRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetProcedureResultRequest) + } + + public interface GetProcedureResultResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .GetProcedureResultResponse.State state = 1; + /** + * required .GetProcedureResultResponse.State state = 1; + */ + boolean hasState(); + /** + * required .GetProcedureResultResponse.State state = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState(); + + // optional uint64 start_time = 2; + /** + * optional uint64 start_time = 2; + */ + boolean hasStartTime(); + /** + * optional uint64 start_time = 2; + */ + long getStartTime(); + + // optional uint64 last_update = 3; + /** + * optional uint64 last_update = 3; + */ + boolean hasLastUpdate(); + /** + * optional uint64 last_update = 3; + */ + long getLastUpdate(); + + // optional bytes result = 4; + /** + * optional bytes result = 4; + */ + boolean hasResult(); + /** + * optional bytes result = 4; + */ + com.google.protobuf.ByteString getResult(); + + // optional .ForeignExceptionMessage exception = 5; + /** + * optional .ForeignExceptionMessage exception = 5; + */ + boolean hasException(); + /** + * optional .ForeignExceptionMessage exception = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException(); + /** + * optional .ForeignExceptionMessage exception = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder(); + } + /** + * Protobuf type {@code GetProcedureResultResponse} + */ + public static final class GetProcedureResultResponse extends + com.google.protobuf.GeneratedMessage + implements GetProcedureResultResponseOrBuilder { + // Use GetProcedureResultResponse.newBuilder() to construct. + private GetProcedureResultResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetProcedureResultResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetProcedureResultResponse defaultInstance; + public static GetProcedureResultResponse getDefaultInstance() { + return defaultInstance; + } + + public GetProcedureResultResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetProcedureResultResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + case 16: { + bitField0_ |= 0x00000002; + startTime_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + lastUpdate_ = input.readUInt64(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + result_ = input.readBytes(); + break; + } + case 42: { + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = exception_.toBuilder(); + } + exception_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(exception_); + exception_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetProcedureResultResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetProcedureResultResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code GetProcedureResultResponse.State} + */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + * NOT_FOUND = 0; + */ + NOT_FOUND(0, 0), + /** + * RUNNING = 1; + */ + RUNNING(1, 1), + /** + * FINISHED = 2; + */ + FINISHED(2, 2), + ; + + /** + * NOT_FOUND = 0; + */ + public static final int NOT_FOUND_VALUE = 0; + /** + * RUNNING = 1; + */ + public static final int RUNNING_VALUE = 1; + /** + * FINISHED = 2; + */ + public static final int FINISHED_VALUE = 2; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return NOT_FOUND; + case 1: return RUNNING; + case 2: return FINISHED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:GetProcedureResultResponse.State) + } + + private int bitField0_; + // required .GetProcedureResultResponse.State state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_; + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { + return state_; + } + + // optional uint64 start_time = 2; + public static final int START_TIME_FIELD_NUMBER = 2; + private long startTime_; + /** + * optional uint64 start_time = 2; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 start_time = 2; + */ + public long getStartTime() { + return startTime_; + } + + // optional uint64 last_update = 3; + public static final int LAST_UPDATE_FIELD_NUMBER = 3; + private long lastUpdate_; + /** + * optional uint64 last_update = 3; + */ + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 last_update = 3; + */ + public long getLastUpdate() { + return lastUpdate_; + } + + // optional bytes result = 4; + public static final int RESULT_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString result_; + /** + * optional bytes result = 4; + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes result = 4; + */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + + // optional .ForeignExceptionMessage exception = 5; + public static final int EXCEPTION_FIELD_NUMBER = 5; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_; + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public boolean hasException() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + return exception_; + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + return exception_; + } + + private void initFields() { + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + startTime_ = 0L; + lastUpdate_ = 0L; + result_ = com.google.protobuf.ByteString.EMPTY; + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, startTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, lastUpdate_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, result_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, exception_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, startTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, lastUpdate_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, result_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, exception_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) obj; + + boolean result = true; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && (hasStartTime() == other.hasStartTime()); + if (hasStartTime()) { + result = result && (getStartTime() + == other.getStartTime()); + } + result = result && (hasLastUpdate() == other.hasLastUpdate()); + if (hasLastUpdate()) { + result = result && (getLastUpdate() + == other.getLastUpdate()); + } + result = result && (hasResult() == other.hasResult()); + if (hasResult()) { + result = result && getResult() + .equals(other.getResult()); + } + result = result && (hasException() == other.hasException()); + if (hasException()) { + result = result && getException() + .equals(other.getException()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTime()); + } + if (hasLastUpdate()) { + hash = (37 * hash) + LAST_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLastUpdate()); + } + if (hasResult()) { + hash = (37 * hash) + RESULT_FIELD_NUMBER; + hash = (53 * hash) + getResult().hashCode(); + } + if (hasException()) { + hash = (37 * hash) + EXCEPTION_FIELD_NUMBER; + hash = (53 * hash) + getException().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetProcedureResultResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getExceptionFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + bitField0_ = (bitField0_ & ~0x00000001); + startTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + lastUpdate_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + result_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.state_ = state_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.startTime_ = startTime_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.lastUpdate_ = lastUpdate_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.result_ = result_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (exceptionBuilder_ == null) { + result.exception_ = exception_; + } else { + result.exception_ = exceptionBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasStartTime()) { + setStartTime(other.getStartTime()); + } + if (other.hasLastUpdate()) { + setLastUpdate(other.getLastUpdate()); + } + if (other.hasResult()) { + setResult(other.getResult()); + } + if (other.hasException()) { + mergeException(other.getException()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasState()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .GetProcedureResultResponse.State state = 1; + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { + return state_; + } + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; + onChanged(); + return this; + } + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + onChanged(); + return this; + } + + // optional uint64 start_time = 2; + private long startTime_ ; + /** + * optional uint64 start_time = 2; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 start_time = 2; + */ + public long getStartTime() { + return startTime_; + } + /** + * optional uint64 start_time = 2; + */ + public Builder setStartTime(long value) { + bitField0_ |= 0x00000002; + startTime_ = value; + onChanged(); + return this; + } + /** + * optional uint64 start_time = 2; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = 0L; + onChanged(); + return this; + } + + // optional uint64 last_update = 3; + private long lastUpdate_ ; + /** + * optional uint64 last_update = 3; + */ + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 last_update = 3; + */ + public long getLastUpdate() { + return lastUpdate_; + } + /** + * optional uint64 last_update = 3; + */ + public Builder setLastUpdate(long value) { + bitField0_ |= 0x00000004; + lastUpdate_ = value; + onChanged(); + return this; + } + /** + * optional uint64 last_update = 3; + */ + public Builder clearLastUpdate() { + bitField0_ = (bitField0_ & ~0x00000004); + lastUpdate_ = 0L; + onChanged(); + return this; + } + + // optional bytes result = 4; + private com.google.protobuf.ByteString result_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes result = 4; + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes result = 4; + */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + /** + * optional bytes result = 4; + */ + public Builder setResult(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + result_ = value; + onChanged(); + return this; + } + /** + * optional bytes result = 4; + */ + public Builder clearResult() { + bitField0_ = (bitField0_ & ~0x00000008); + result_ = getDefaultInstance().getResult(); + onChanged(); + return this; + } + + // optional .ForeignExceptionMessage exception = 5; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> exceptionBuilder_; + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public boolean hasException() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + if (exceptionBuilder_ == null) { + return exception_; + } else { + return exceptionBuilder_.getMessage(); + } + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public Builder setException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + exception_ = value; + onChanged(); + } else { + exceptionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public Builder setException( + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder builderForValue) { + if (exceptionBuilder_ == null) { + exception_ = builderForValue.build(); + onChanged(); + } else { + exceptionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public Builder mergeException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + exception_ != org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance()) { + exception_ = + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.newBuilder(exception_).mergeFrom(value).buildPartial(); + } else { + exception_ = value; + } + onChanged(); + } else { + exceptionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public Builder clearException() { + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + onChanged(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder getExceptionBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getExceptionFieldBuilder().getBuilder(); + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + if (exceptionBuilder_ != null) { + return exceptionBuilder_.getMessageOrBuilder(); + } else { + return exception_; + } + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> + getExceptionFieldBuilder() { + if (exceptionBuilder_ == null) { + exceptionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder>( + exception_, + getParentForChildren(), + isClean()); + exception_ = null; + } + return exceptionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetProcedureResultResponse) + } + + static { + defaultInstance = new GetProcedureResultResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetProcedureResultResponse) + } + public interface SetQuotaRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -46201,6 +47851,14 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc getProcedureResult(.GetProcedureResultRequest) returns (.GetProcedureResultResponse); + */ + public abstract void getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -46582,6 +48240,14 @@ public final class MasterProtos { impl.getLastMajorCompactionTimestampForRegion(controller, request, done); } + @java.lang.Override + public void getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + com.google.protobuf.RpcCallback done) { + impl.getProcedureResult(controller, request, done); + } + }; } @@ -46698,6 +48364,8 @@ public final class MasterProtos { return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); case 46: return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); + case 47: + return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -46806,6 +48474,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); case 46: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + case 47: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -46914,6 +48584,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 46: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + case 47: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -47513,6 +49185,14 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc getProcedureResult(.GetProcedureResultRequest) returns (.GetProcedureResultResponse); + */ + public abstract void getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -47770,6 +49450,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 47: + this.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -47878,6 +49563,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); case 46: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + case 47: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -47986,6 +49673,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 46: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + case 47: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -48711,6 +50400,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance())); } + + public void getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(47), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -48953,6 +50657,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -49525,6 +51234,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(47), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:MasterService) @@ -49950,6 +51671,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_IsProcedureDoneResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetProcedureResultRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetProcedureResultRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetProcedureResultResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetProcedureResultResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_SetQuotaRequest_descriptor; private static @@ -49985,218 +51716,228 @@ public final class MasterProtos { static { java.lang.String[] descriptorData = { "\n\014Master.proto\032\013HBase.proto\032\014Client.prot" + - "o\032\023ClusterStatus.proto\032\013Quota.proto\"`\n\020A" + - "ddColumnRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Ta" + - "bleName\022,\n\017column_families\030\002 \002(\0132\023.Colum" + - "nFamilySchema\"\023\n\021AddColumnResponse\"J\n\023De" + - "leteColumnRequest\022\036\n\ntable_name\030\001 \002(\0132\n." + - "TableName\022\023\n\013column_name\030\002 \002(\014\"\026\n\024Delete" + - "ColumnResponse\"c\n\023ModifyColumnRequest\022\036\n" + - "\ntable_name\030\001 \002(\0132\n.TableName\022,\n\017column_" + - "families\030\002 \002(\0132\023.ColumnFamilySchema\"\026\n\024M", - "odifyColumnResponse\"\\\n\021MoveRegionRequest" + - "\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022%\n\020de" + - "st_server_name\030\002 \001(\0132\013.ServerName\"\024\n\022Mov" + - "eRegionResponse\"\200\001\n\035DispatchMergingRegio" + - "nsRequest\022\"\n\010region_a\030\001 \002(\0132\020.RegionSpec" + - "ifier\022\"\n\010region_b\030\002 \002(\0132\020.RegionSpecifie" + - "r\022\027\n\010forcible\030\003 \001(\010:\005false\" \n\036DispatchMe" + - "rgingRegionsResponse\"7\n\023AssignRegionRequ" + - "est\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\"\026\n" + - "\024AssignRegionResponse\"O\n\025UnassignRegionR", - "equest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier" + - "\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRegion" + - "Response\"8\n\024OfflineRegionRequest\022 \n\006regi" + - "on\030\001 \002(\0132\020.RegionSpecifier\"\027\n\025OfflineReg" + - "ionResponse\"L\n\022CreateTableRequest\022\"\n\014tab" + - "le_schema\030\001 \002(\0132\014.TableSchema\022\022\n\nsplit_k" + - "eys\030\002 \003(\014\"\025\n\023CreateTableResponse\"4\n\022Dele" + - "teTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Tab" + - "leName\"\025\n\023DeleteTableResponse\"T\n\024Truncat" + - "eTableRequest\022\035\n\ttableName\030\001 \002(\0132\n.Table", - "Name\022\035\n\016preserveSplits\030\002 \001(\010:\005false\"\027\n\025T" + - "runcateTableResponse\"4\n\022EnableTableReque" + - "st\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"\025\n\023En" + - "ableTableResponse\"5\n\023DisableTableRequest" + - "\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"\026\n\024Disa" + - "bleTableResponse\"X\n\022ModifyTableRequest\022\036" + - "\n\ntable_name\030\001 \002(\0132\n.TableName\022\"\n\014table_" + - "schema\030\002 \002(\0132\014.TableSchema\"\025\n\023ModifyTabl" + - "eResponse\"K\n\026CreateNamespaceRequest\0221\n\023n" + - "amespaceDescriptor\030\001 \002(\0132\024.NamespaceDesc", - "riptor\"\031\n\027CreateNamespaceResponse\"/\n\026Del" + - "eteNamespaceRequest\022\025\n\rnamespaceName\030\001 \002" + - "(\t\"\031\n\027DeleteNamespaceResponse\"K\n\026ModifyN" + - "amespaceRequest\0221\n\023namespaceDescriptor\030\001" + - " \002(\0132\024.NamespaceDescriptor\"\031\n\027ModifyName" + - "spaceResponse\"6\n\035GetNamespaceDescriptorR" + - "equest\022\025\n\rnamespaceName\030\001 \002(\t\"S\n\036GetName" + - "spaceDescriptorResponse\0221\n\023namespaceDesc" + - "riptor\030\001 \002(\0132\024.NamespaceDescriptor\"!\n\037Li" + - "stNamespaceDescriptorsRequest\"U\n ListNam", - "espaceDescriptorsResponse\0221\n\023namespaceDe" + - "scriptor\030\001 \003(\0132\024.NamespaceDescriptor\"?\n&" + - "ListTableDescriptorsByNamespaceRequest\022\025" + - "\n\rnamespaceName\030\001 \002(\t\"L\n\'ListTableDescri" + - "ptorsByNamespaceResponse\022!\n\013tableSchema\030" + - "\001 \003(\0132\014.TableSchema\"9\n ListTableNamesByN" + - "amespaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"B" + - "\n!ListTableNamesByNamespaceResponse\022\035\n\tt" + - "ableName\030\001 \003(\0132\n.TableName\"\021\n\017ShutdownRe" + - "quest\"\022\n\020ShutdownResponse\"\023\n\021StopMasterR", - "equest\"\024\n\022StopMasterResponse\"\020\n\016BalanceR" + - "equest\"\'\n\017BalanceResponse\022\024\n\014balancer_ra" + - "n\030\001 \002(\010\"<\n\031SetBalancerRunningRequest\022\n\n\002" + - "on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBala" + - "ncerRunningResponse\022\032\n\022prev_balance_valu" + - "e\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequest\",\n\031I" + - "sBalancerEnabledResponse\022\017\n\007enabled\030\001 \002(" + - "\010\"\027\n\025RunCatalogScanRequest\"-\n\026RunCatalog" + - "ScanResponse\022\023\n\013scan_result\030\001 \001(\005\"-\n\033Ena" + - "bleCatalogJanitorRequest\022\016\n\006enable\030\001 \002(\010", - "\"2\n\034EnableCatalogJanitorResponse\022\022\n\nprev" + - "_value\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledR" + - "equest\"0\n\037IsCatalogJanitorEnabledRespons" + - "e\022\r\n\005value\030\001 \002(\010\"9\n\017SnapshotRequest\022&\n\010s" + - "napshot\030\001 \002(\0132\024.SnapshotDescription\",\n\020S" + - "napshotResponse\022\030\n\020expected_timeout\030\001 \002(" + - "\003\"\036\n\034GetCompletedSnapshotsRequest\"H\n\035Get" + - "CompletedSnapshotsResponse\022\'\n\tsnapshots\030" + - "\001 \003(\0132\024.SnapshotDescription\"?\n\025DeleteSna" + - "pshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Snapsho", - "tDescription\"\030\n\026DeleteSnapshotResponse\"@" + - "\n\026RestoreSnapshotRequest\022&\n\010snapshot\030\001 \002" + - "(\0132\024.SnapshotDescription\"\031\n\027RestoreSnaps" + - "hotResponse\"?\n\025IsSnapshotDoneRequest\022&\n\010" + - "snapshot\030\001 \001(\0132\024.SnapshotDescription\"U\n\026" + - "IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fa" + - "lse\022&\n\010snapshot\030\002 \001(\0132\024.SnapshotDescript" + - "ion\"F\n\034IsRestoreSnapshotDoneRequest\022&\n\010s" + - "napshot\030\001 \001(\0132\024.SnapshotDescription\"4\n\035I" + - "sRestoreSnapshotDoneResponse\022\023\n\004done\030\001 \001", - "(\010:\005false\"=\n\033GetSchemaAlterStatusRequest" + - "\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"T\n\034GetS" + - "chemaAlterStatusResponse\022\035\n\025yet_to_updat" + - "e_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\202" + - "\001\n\032GetTableDescriptorsRequest\022\037\n\013table_n" + - "ames\030\001 \003(\0132\n.TableName\022\r\n\005regex\030\002 \001(\t\022!\n" + - "\022include_sys_tables\030\003 \001(\010:\005false\022\021\n\tname" + - "space\030\004 \001(\t\"A\n\033GetTableDescriptorsRespon" + - "se\022\"\n\014table_schema\030\001 \003(\0132\014.TableSchema\"[" + - "\n\024GetTableNamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n", - "\022include_sys_tables\030\002 \001(\010:\005false\022\021\n\tname" + - "space\030\003 \001(\t\"8\n\025GetTableNamesResponse\022\037\n\013" + - "table_names\030\001 \003(\0132\n.TableName\"\031\n\027GetClus" + - "terStatusRequest\"B\n\030GetClusterStatusResp" + - "onse\022&\n\016cluster_status\030\001 \002(\0132\016.ClusterSt" + - "atus\"\030\n\026IsMasterRunningRequest\"4\n\027IsMast" + - "erRunningResponse\022\031\n\021is_master_running\030\001" + - " \002(\010\"@\n\024ExecProcedureRequest\022(\n\tprocedur" + - "e\030\001 \002(\0132\025.ProcedureDescription\"F\n\025ExecPr" + - "ocedureResponse\022\030\n\020expected_timeout\030\001 \001(", - "\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026IsProcedureDon" + - "eRequest\022(\n\tprocedure\030\001 \001(\0132\025.ProcedureD" + - "escription\"W\n\027IsProcedureDoneResponse\022\023\n" + - "\004done\030\001 \001(\010:\005false\022\'\n\010snapshot\030\002 \001(\0132\025.P" + - "rocedureDescription\"\273\001\n\017SetQuotaRequest\022" + - "\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021" + - "\n\tnamespace\030\003 \001(\t\022\036\n\ntable_name\030\004 \001(\0132\n." + - "TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_" + - "globals\030\006 \001(\010\022\"\n\010throttle\030\007 \001(\0132\020.Thrott" + - "leRequest\"\022\n\020SetQuotaResponse\"A\n\037MajorCo", - "mpactionTimestampRequest\022\036\n\ntable_name\030\001" + - " \002(\0132\n.TableName\"L\n(MajorCompactionTimes" + - "tampForRegionRequest\022 \n\006region\030\001 \002(\0132\020.R" + - "egionSpecifier\"@\n MajorCompactionTimesta" + - "mpResponse\022\034\n\024compaction_timestamp\030\001 \002(\003" + - "2\324\032\n\rMasterService\022S\n\024GetSchemaAlterStat" + - "us\022\034.GetSchemaAlterStatusRequest\032\035.GetSc" + - "hemaAlterStatusResponse\022P\n\023GetTableDescr" + - "iptors\022\033.GetTableDescriptorsRequest\032\034.Ge" + - "tTableDescriptorsResponse\022>\n\rGetTableNam", - "es\022\025.GetTableNamesRequest\032\026.GetTableName" + - "sResponse\022G\n\020GetClusterStatus\022\030.GetClust" + - "erStatusRequest\032\031.GetClusterStatusRespon" + - "se\022D\n\017IsMasterRunning\022\027.IsMasterRunningR" + - "equest\032\030.IsMasterRunningResponse\0222\n\tAddC" + - "olumn\022\021.AddColumnRequest\032\022.AddColumnResp" + - "onse\022;\n\014DeleteColumn\022\024.DeleteColumnReque" + - "st\032\025.DeleteColumnResponse\022;\n\014ModifyColum" + - "n\022\024.ModifyColumnRequest\032\025.ModifyColumnRe" + - "sponse\0225\n\nMoveRegion\022\022.MoveRegionRequest", - "\032\023.MoveRegionResponse\022Y\n\026DispatchMerging" + - "Regions\022\036.DispatchMergingRegionsRequest\032" + - "\037.DispatchMergingRegionsResponse\022;\n\014Assi" + - "gnRegion\022\024.AssignRegionRequest\032\025.AssignR" + - "egionResponse\022A\n\016UnassignRegion\022\026.Unassi" + - "gnRegionRequest\032\027.UnassignRegionResponse" + - "\022>\n\rOfflineRegion\022\025.OfflineRegionRequest" + - "\032\026.OfflineRegionResponse\0228\n\013DeleteTable\022" + - "\023.DeleteTableRequest\032\024.DeleteTableRespon" + - "se\022>\n\rtruncateTable\022\025.TruncateTableReque", - "st\032\026.TruncateTableResponse\0228\n\013EnableTabl" + - "e\022\023.EnableTableRequest\032\024.EnableTableResp" + - "onse\022;\n\014DisableTable\022\024.DisableTableReque" + - "st\032\025.DisableTableResponse\0228\n\013ModifyTable" + - "\022\023.ModifyTableRequest\032\024.ModifyTableRespo" + - "nse\0228\n\013CreateTable\022\023.CreateTableRequest\032" + - "\024.CreateTableResponse\022/\n\010Shutdown\022\020.Shut" + - "downRequest\032\021.ShutdownResponse\0225\n\nStopMa" + - "ster\022\022.StopMasterRequest\032\023.StopMasterRes" + - "ponse\022,\n\007Balance\022\017.BalanceRequest\032\020.Bala", - "nceResponse\022M\n\022SetBalancerRunning\022\032.SetB" + - "alancerRunningRequest\032\033.SetBalancerRunni" + - "ngResponse\022J\n\021IsBalancerEnabled\022\031.IsBala" + - "ncerEnabledRequest\032\032.IsBalancerEnabledRe" + - "sponse\022A\n\016RunCatalogScan\022\026.RunCatalogSca" + - "nRequest\032\027.RunCatalogScanResponse\022S\n\024Ena" + - "bleCatalogJanitor\022\034.EnableCatalogJanitor" + - "Request\032\035.EnableCatalogJanitorResponse\022\\" + - "\n\027IsCatalogJanitorEnabled\022\037.IsCatalogJan" + - "itorEnabledRequest\032 .IsCatalogJanitorEna", - "bledResponse\022L\n\021ExecMasterService\022\032.Copr" + - "ocessorServiceRequest\032\033.CoprocessorServi" + - "ceResponse\022/\n\010Snapshot\022\020.SnapshotRequest" + - "\032\021.SnapshotResponse\022V\n\025GetCompletedSnaps" + - "hots\022\035.GetCompletedSnapshotsRequest\032\036.Ge" + - "tCompletedSnapshotsResponse\022A\n\016DeleteSna" + - "pshot\022\026.DeleteSnapshotRequest\032\027.DeleteSn" + - "apshotResponse\022A\n\016IsSnapshotDone\022\026.IsSna" + - "pshotDoneRequest\032\027.IsSnapshotDoneRespons" + - "e\022D\n\017RestoreSnapshot\022\027.RestoreSnapshotRe", - "quest\032\030.RestoreSnapshotResponse\022V\n\025IsRes" + - "toreSnapshotDone\022\035.IsRestoreSnapshotDone" + - "Request\032\036.IsRestoreSnapshotDoneResponse\022" + - ">\n\rExecProcedure\022\025.ExecProcedureRequest\032" + - "\026.ExecProcedureResponse\022E\n\024ExecProcedure" + - "WithRet\022\025.ExecProcedureRequest\032\026.ExecPro" + - "cedureResponse\022D\n\017IsProcedureDone\022\027.IsPr" + - "ocedureDoneRequest\032\030.IsProcedureDoneResp" + - "onse\022D\n\017ModifyNamespace\022\027.ModifyNamespac" + - "eRequest\032\030.ModifyNamespaceResponse\022D\n\017Cr", - "eateNamespace\022\027.CreateNamespaceRequest\032\030" + - ".CreateNamespaceResponse\022D\n\017DeleteNamesp" + - "ace\022\027.DeleteNamespaceRequest\032\030.DeleteNam" + - "espaceResponse\022Y\n\026GetNamespaceDescriptor" + - "\022\036.GetNamespaceDescriptorRequest\032\037.GetNa" + - "mespaceDescriptorResponse\022_\n\030ListNamespa" + - "ceDescriptors\022 .ListNamespaceDescriptors" + - "Request\032!.ListNamespaceDescriptorsRespon" + - "se\022t\n\037ListTableDescriptorsByNamespace\022\'." + - "ListTableDescriptorsByNamespaceRequest\032(", - ".ListTableDescriptorsByNamespaceResponse" + - "\022b\n\031ListTableNamesByNamespace\022!.ListTabl" + - "eNamesByNamespaceRequest\032\".ListTableName" + - "sByNamespaceResponse\022/\n\010SetQuota\022\020.SetQu" + - "otaRequest\032\021.SetQuotaResponse\022f\n\037getLast" + - "MajorCompactionTimestamp\022 .MajorCompacti" + - "onTimestampRequest\032!.MajorCompactionTime" + - "stampResponse\022x\n(getLastMajorCompactionT" + - "imestampForRegion\022).MajorCompactionTimes" + - "tampForRegionRequest\032!.MajorCompactionTi", - "mestampResponseBB\n*org.apache.hadoop.hba" + - "se.protobuf.generatedB\014MasterProtosH\001\210\001\001" + - "\240\001\001" + "o\032\023ClusterStatus.proto\032\023ErrorHandling.pr" + + "oto\032\013Quota.proto\"`\n\020AddColumnRequest\022\036\n\n" + + "table_name\030\001 \002(\0132\n.TableName\022,\n\017column_f" + + "amilies\030\002 \002(\0132\023.ColumnFamilySchema\"\023\n\021Ad" + + "dColumnResponse\"J\n\023DeleteColumnRequest\022\036" + + "\n\ntable_name\030\001 \002(\0132\n.TableName\022\023\n\013column" + + "_name\030\002 \002(\014\"\026\n\024DeleteColumnResponse\"c\n\023M" + + "odifyColumnRequest\022\036\n\ntable_name\030\001 \002(\0132\n" + + ".TableName\022,\n\017column_families\030\002 \002(\0132\023.Co", + "lumnFamilySchema\"\026\n\024ModifyColumnResponse" + + "\"\\\n\021MoveRegionRequest\022 \n\006region\030\001 \002(\0132\020." + + "RegionSpecifier\022%\n\020dest_server_name\030\002 \001(" + + "\0132\013.ServerName\"\024\n\022MoveRegionResponse\"\200\001\n" + + "\035DispatchMergingRegionsRequest\022\"\n\010region" + + "_a\030\001 \002(\0132\020.RegionSpecifier\022\"\n\010region_b\030\002" + + " \002(\0132\020.RegionSpecifier\022\027\n\010forcible\030\003 \001(\010" + + ":\005false\" \n\036DispatchMergingRegionsRespons" + + "e\"7\n\023AssignRegionRequest\022 \n\006region\030\001 \002(\013" + + "2\020.RegionSpecifier\"\026\n\024AssignRegionRespon", + "se\"O\n\025UnassignRegionRequest\022 \n\006region\030\001 " + + "\002(\0132\020.RegionSpecifier\022\024\n\005force\030\002 \001(\010:\005fa" + + "lse\"\030\n\026UnassignRegionResponse\"8\n\024Offline" + + "RegionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSp" + + "ecifier\"\027\n\025OfflineRegionResponse\"L\n\022Crea" + + "teTableRequest\022\"\n\014table_schema\030\001 \002(\0132\014.T" + + "ableSchema\022\022\n\nsplit_keys\030\002 \003(\014\"&\n\023Create" + + "TableResponse\022\017\n\007proc_id\030\001 \001(\004\"4\n\022Delete" + + "TableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Table" + + "Name\"&\n\023DeleteTableResponse\022\017\n\007proc_id\030\001", + " \001(\004\"T\n\024TruncateTableRequest\022\035\n\ttableNam" + + "e\030\001 \002(\0132\n.TableName\022\035\n\016preserveSplits\030\002 " + + "\001(\010:\005false\"\027\n\025TruncateTableResponse\"4\n\022E" + + "nableTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n." + + "TableName\"\025\n\023EnableTableResponse\"5\n\023Disa" + + "bleTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Ta" + + "bleName\"\026\n\024DisableTableResponse\"X\n\022Modif" + + "yTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Tabl" + + "eName\022\"\n\014table_schema\030\002 \002(\0132\014.TableSchem" + + "a\"\025\n\023ModifyTableResponse\"K\n\026CreateNamesp", + "aceRequest\0221\n\023namespaceDescriptor\030\001 \002(\0132" + + "\024.NamespaceDescriptor\"\031\n\027CreateNamespace" + + "Response\"/\n\026DeleteNamespaceRequest\022\025\n\rna" + + "mespaceName\030\001 \002(\t\"\031\n\027DeleteNamespaceResp" + + "onse\"K\n\026ModifyNamespaceRequest\0221\n\023namesp" + + "aceDescriptor\030\001 \002(\0132\024.NamespaceDescripto" + + "r\"\031\n\027ModifyNamespaceResponse\"6\n\035GetNames" + + "paceDescriptorRequest\022\025\n\rnamespaceName\030\001" + + " \002(\t\"S\n\036GetNamespaceDescriptorResponse\0221" + + "\n\023namespaceDescriptor\030\001 \002(\0132\024.NamespaceD", + "escriptor\"!\n\037ListNamespaceDescriptorsReq" + + "uest\"U\n ListNamespaceDescriptorsResponse" + + "\0221\n\023namespaceDescriptor\030\001 \003(\0132\024.Namespac" + + "eDescriptor\"?\n&ListTableDescriptorsByNam" + + "espaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"L\n\'" + + "ListTableDescriptorsByNamespaceResponse\022" + + "!\n\013tableSchema\030\001 \003(\0132\014.TableSchema\"9\n Li" + + "stTableNamesByNamespaceRequest\022\025\n\rnamesp" + + "aceName\030\001 \002(\t\"B\n!ListTableNamesByNamespa" + + "ceResponse\022\035\n\ttableName\030\001 \003(\0132\n.TableNam", + "e\"\021\n\017ShutdownRequest\"\022\n\020ShutdownResponse" + + "\"\023\n\021StopMasterRequest\"\024\n\022StopMasterRespo" + + "nse\"\020\n\016BalanceRequest\"\'\n\017BalanceResponse" + + "\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031SetBalancerRun" + + "ningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002" + + " \001(\010\"8\n\032SetBalancerRunningResponse\022\032\n\022pr" + + "ev_balance_value\030\001 \001(\010\"\032\n\030IsBalancerEnab" + + "ledRequest\",\n\031IsBalancerEnabledResponse\022" + + "\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalogScanReques" + + "t\"-\n\026RunCatalogScanResponse\022\023\n\013scan_resu", + "lt\030\001 \001(\005\"-\n\033EnableCatalogJanitorRequest\022" + + "\016\n\006enable\030\001 \002(\010\"2\n\034EnableCatalogJanitorR" + + "esponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalog" + + "JanitorEnabledRequest\"0\n\037IsCatalogJanito" + + "rEnabledResponse\022\r\n\005value\030\001 \002(\010\"9\n\017Snaps" + + "hotRequest\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotD" + + "escription\",\n\020SnapshotResponse\022\030\n\020expect" + + "ed_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnapshot" + + "sRequest\"H\n\035GetCompletedSnapshotsRespons" + + "e\022\'\n\tsnapshots\030\001 \003(\0132\024.SnapshotDescripti", + "on\"?\n\025DeleteSnapshotRequest\022&\n\010snapshot\030" + + "\001 \002(\0132\024.SnapshotDescription\"\030\n\026DeleteSna" + + "pshotResponse\"@\n\026RestoreSnapshotRequest\022" + + "&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescription\"" + + "\031\n\027RestoreSnapshotResponse\"?\n\025IsSnapshot" + + "DoneRequest\022&\n\010snapshot\030\001 \001(\0132\024.Snapshot" + + "Description\"U\n\026IsSnapshotDoneResponse\022\023\n" + + "\004done\030\001 \001(\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.S" + + "napshotDescription\"F\n\034IsRestoreSnapshotD" + + "oneRequest\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotD", + "escription\"4\n\035IsRestoreSnapshotDoneRespo" + + "nse\022\023\n\004done\030\001 \001(\010:\005false\"=\n\033GetSchemaAlt" + + "erStatusRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Ta" + + "bleName\"T\n\034GetSchemaAlterStatusResponse\022" + + "\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rtotal_" + + "regions\030\002 \001(\r\"\202\001\n\032GetTableDescriptorsReq" + + "uest\022\037\n\013table_names\030\001 \003(\0132\n.TableName\022\r\n" + + "\005regex\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010" + + ":\005false\022\021\n\tnamespace\030\004 \001(\t\"A\n\033GetTableDe" + + "scriptorsResponse\022\"\n\014table_schema\030\001 \003(\0132", + "\014.TableSchema\"[\n\024GetTableNamesRequest\022\r\n" + + "\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010" + + ":\005false\022\021\n\tnamespace\030\003 \001(\t\"8\n\025GetTableNa" + + "mesResponse\022\037\n\013table_names\030\001 \003(\0132\n.Table" + + "Name\"\031\n\027GetClusterStatusRequest\"B\n\030GetCl" + + "usterStatusResponse\022&\n\016cluster_status\030\001 " + + "\002(\0132\016.ClusterStatus\"\030\n\026IsMasterRunningRe" + + "quest\"4\n\027IsMasterRunningResponse\022\031\n\021is_m" + + "aster_running\030\001 \002(\010\"@\n\024ExecProcedureRequ" + + "est\022(\n\tprocedure\030\001 \002(\0132\025.ProcedureDescri", + "ption\"F\n\025ExecProcedureResponse\022\030\n\020expect" + + "ed_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n" + + "\026IsProcedureDoneRequest\022(\n\tprocedure\030\001 \001" + + "(\0132\025.ProcedureDescription\"W\n\027IsProcedure" + + "DoneResponse\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010sna" + + "pshot\030\002 \001(\0132\025.ProcedureDescription\",\n\031Ge" + + "tProcedureResultRequest\022\017\n\007proc_id\030\001 \002(\004" + + "\"\347\001\n\032GetProcedureResultResponse\0220\n\005state" + + "\030\001 \002(\0162!.GetProcedureResultResponse.Stat" + + "e\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001", + "(\004\022\016\n\006result\030\004 \001(\014\022+\n\texception\030\005 \001(\0132\030." + + "ForeignExceptionMessage\"1\n\005State\022\r\n\tNOT_" + + "FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"\273\001\n\017S" + + "etQuotaRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuse" + + "r_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\036\n\ntabl" + + "e_name\030\004 \001(\0132\n.TableName\022\022\n\nremove_all\030\005" + + " \001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022\"\n\010throttle" + + "\030\007 \001(\0132\020.ThrottleRequest\"\022\n\020SetQuotaResp" + + "onse\"A\n\037MajorCompactionTimestampRequest\022" + + "\036\n\ntable_name\030\001 \002(\0132\n.TableName\"L\n(Major", + "CompactionTimestampForRegionRequest\022 \n\006r" + + "egion\030\001 \002(\0132\020.RegionSpecifier\"@\n MajorCo" + + "mpactionTimestampResponse\022\034\n\024compaction_" + + "timestamp\030\001 \002(\0032\243\033\n\rMasterService\022S\n\024Get" + + "SchemaAlterStatus\022\034.GetSchemaAlterStatus" + + "Request\032\035.GetSchemaAlterStatusResponse\022P" + + "\n\023GetTableDescriptors\022\033.GetTableDescript" + + "orsRequest\032\034.GetTableDescriptorsResponse" + + "\022>\n\rGetTableNames\022\025.GetTableNamesRequest" + + "\032\026.GetTableNamesResponse\022G\n\020GetClusterSt", + "atus\022\030.GetClusterStatusRequest\032\031.GetClus" + + "terStatusResponse\022D\n\017IsMasterRunning\022\027.I" + + "sMasterRunningRequest\032\030.IsMasterRunningR" + + "esponse\0222\n\tAddColumn\022\021.AddColumnRequest\032" + + "\022.AddColumnResponse\022;\n\014DeleteColumn\022\024.De" + + "leteColumnRequest\032\025.DeleteColumnResponse" + + "\022;\n\014ModifyColumn\022\024.ModifyColumnRequest\032\025" + + ".ModifyColumnResponse\0225\n\nMoveRegion\022\022.Mo" + + "veRegionRequest\032\023.MoveRegionResponse\022Y\n\026" + + "DispatchMergingRegions\022\036.DispatchMerging", + "RegionsRequest\032\037.DispatchMergingRegionsR" + + "esponse\022;\n\014AssignRegion\022\024.AssignRegionRe" + + "quest\032\025.AssignRegionResponse\022A\n\016Unassign" + + "Region\022\026.UnassignRegionRequest\032\027.Unassig" + + "nRegionResponse\022>\n\rOfflineRegion\022\025.Offli" + + "neRegionRequest\032\026.OfflineRegionResponse\022" + + "8\n\013DeleteTable\022\023.DeleteTableRequest\032\024.De" + + "leteTableResponse\022>\n\rtruncateTable\022\025.Tru" + + "ncateTableRequest\032\026.TruncateTableRespons" + + "e\0228\n\013EnableTable\022\023.EnableTableRequest\032\024.", + "EnableTableResponse\022;\n\014DisableTable\022\024.Di" + + "sableTableRequest\032\025.DisableTableResponse" + + "\0228\n\013ModifyTable\022\023.ModifyTableRequest\032\024.M" + + "odifyTableResponse\0228\n\013CreateTable\022\023.Crea" + + "teTableRequest\032\024.CreateTableResponse\022/\n\010" + + "Shutdown\022\020.ShutdownRequest\032\021.ShutdownRes" + + "ponse\0225\n\nStopMaster\022\022.StopMasterRequest\032" + + "\023.StopMasterResponse\022,\n\007Balance\022\017.Balanc" + + "eRequest\032\020.BalanceResponse\022M\n\022SetBalance" + + "rRunning\022\032.SetBalancerRunningRequest\032\033.S", + "etBalancerRunningResponse\022J\n\021IsBalancerE" + + "nabled\022\031.IsBalancerEnabledRequest\032\032.IsBa" + + "lancerEnabledResponse\022A\n\016RunCatalogScan\022" + + "\026.RunCatalogScanRequest\032\027.RunCatalogScan" + + "Response\022S\n\024EnableCatalogJanitor\022\034.Enabl" + + "eCatalogJanitorRequest\032\035.EnableCatalogJa" + + "nitorResponse\022\\\n\027IsCatalogJanitorEnabled" + + "\022\037.IsCatalogJanitorEnabledRequest\032 .IsCa" + + "talogJanitorEnabledResponse\022L\n\021ExecMaste" + + "rService\022\032.CoprocessorServiceRequest\032\033.C", + "oprocessorServiceResponse\022/\n\010Snapshot\022\020." + + "SnapshotRequest\032\021.SnapshotResponse\022V\n\025Ge" + + "tCompletedSnapshots\022\035.GetCompletedSnapsh" + + "otsRequest\032\036.GetCompletedSnapshotsRespon" + + "se\022A\n\016DeleteSnapshot\022\026.DeleteSnapshotReq" + + "uest\032\027.DeleteSnapshotResponse\022A\n\016IsSnaps" + + "hotDone\022\026.IsSnapshotDoneRequest\032\027.IsSnap" + + "shotDoneResponse\022D\n\017RestoreSnapshot\022\027.Re" + + "storeSnapshotRequest\032\030.RestoreSnapshotRe" + + "sponse\022V\n\025IsRestoreSnapshotDone\022\035.IsRest", + "oreSnapshotDoneRequest\032\036.IsRestoreSnapsh" + + "otDoneResponse\022>\n\rExecProcedure\022\025.ExecPr" + + "ocedureRequest\032\026.ExecProcedureResponse\022E" + + "\n\024ExecProcedureWithRet\022\025.ExecProcedureRe" + + "quest\032\026.ExecProcedureResponse\022D\n\017IsProce" + + "dureDone\022\027.IsProcedureDoneRequest\032\030.IsPr" + + "ocedureDoneResponse\022D\n\017ModifyNamespace\022\027" + + ".ModifyNamespaceRequest\032\030.ModifyNamespac" + + "eResponse\022D\n\017CreateNamespace\022\027.CreateNam" + + "espaceRequest\032\030.CreateNamespaceResponse\022", + "D\n\017DeleteNamespace\022\027.DeleteNamespaceRequ" + + "est\032\030.DeleteNamespaceResponse\022Y\n\026GetName" + + "spaceDescriptor\022\036.GetNamespaceDescriptor" + + "Request\032\037.GetNamespaceDescriptorResponse" + + "\022_\n\030ListNamespaceDescriptors\022 .ListNames" + + "paceDescriptorsRequest\032!.ListNamespaceDe" + + "scriptorsResponse\022t\n\037ListTableDescriptor" + + "sByNamespace\022\'.ListTableDescriptorsByNam" + + "espaceRequest\032(.ListTableDescriptorsByNa" + + "mespaceResponse\022b\n\031ListTableNamesByNames", + "pace\022!.ListTableNamesByNamespaceRequest\032" + + "\".ListTableNamesByNamespaceResponse\022/\n\010S" + + "etQuota\022\020.SetQuotaRequest\032\021.SetQuotaResp" + + "onse\022f\n\037getLastMajorCompactionTimestamp\022" + + " .MajorCompactionTimestampRequest\032!.Majo" + + "rCompactionTimestampResponse\022x\n(getLastM" + + "ajorCompactionTimestampForRegion\022).Major" + + "CompactionTimestampForRegionRequest\032!.Ma" + + "jorCompactionTimestampResponse\022M\n\022getPro" + + "cedureResult\022\032.GetProcedureResultRequest", + "\032\033.GetProcedureResultResponseBB\n*org.apa" + + "che.hadoop.hbase.protobuf.generatedB\014Mas" + + "terProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -50310,7 +52051,7 @@ public final class MasterProtos { internal_static_CreateTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CreateTableResponse_descriptor, - new java.lang.String[] { }); + new java.lang.String[] { "ProcId", }); internal_static_DeleteTableRequest_descriptor = getDescriptor().getMessageTypes().get(18); internal_static_DeleteTableRequest_fieldAccessorTable = new @@ -50322,7 +52063,7 @@ public final class MasterProtos { internal_static_DeleteTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DeleteTableResponse_descriptor, - new java.lang.String[] { }); + new java.lang.String[] { "ProcId", }); internal_static_TruncateTableRequest_descriptor = getDescriptor().getMessageTypes().get(20); internal_static_TruncateTableRequest_fieldAccessorTable = new @@ -50707,32 +52448,44 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsProcedureDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); - internal_static_SetQuotaRequest_descriptor = + internal_static_GetProcedureResultRequest_descriptor = getDescriptor().getMessageTypes().get(84); + internal_static_GetProcedureResultRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetProcedureResultRequest_descriptor, + new java.lang.String[] { "ProcId", }); + internal_static_GetProcedureResultResponse_descriptor = + getDescriptor().getMessageTypes().get(85); + internal_static_GetProcedureResultResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetProcedureResultResponse_descriptor, + new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", }); + internal_static_SetQuotaRequest_descriptor = + getDescriptor().getMessageTypes().get(86); internal_static_SetQuotaRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetQuotaRequest_descriptor, new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); internal_static_SetQuotaResponse_descriptor = - getDescriptor().getMessageTypes().get(85); + getDescriptor().getMessageTypes().get(87); internal_static_SetQuotaResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetQuotaResponse_descriptor, new java.lang.String[] { }); internal_static_MajorCompactionTimestampRequest_descriptor = - getDescriptor().getMessageTypes().get(86); + getDescriptor().getMessageTypes().get(88); internal_static_MajorCompactionTimestampRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MajorCompactionTimestampRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_MajorCompactionTimestampForRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(87); + getDescriptor().getMessageTypes().get(89); internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MajorCompactionTimestampForRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_MajorCompactionTimestampResponse_descriptor = - getDescriptor().getMessageTypes().get(88); + getDescriptor().getMessageTypes().get(90); internal_static_MajorCompactionTimestampResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MajorCompactionTimestampResponse_descriptor, @@ -50746,6 +52499,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.getDescriptor(), }, assigner); } diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index e2814e733f3..22ad882b3a9 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -28,6 +28,7 @@ option optimize_for = SPEED; import "HBase.proto"; import "Client.proto"; import "ClusterStatus.proto"; +import "ErrorHandling.proto"; import "Quota.proto"; /* Column-level protobufs */ @@ -108,6 +109,7 @@ message CreateTableRequest { } message CreateTableResponse { + optional uint64 proc_id = 1; } message DeleteTableRequest { @@ -115,6 +117,7 @@ message DeleteTableRequest { } message DeleteTableResponse { + optional uint64 proc_id = 1; } message TruncateTableRequest { @@ -372,6 +375,24 @@ message IsProcedureDoneResponse { optional ProcedureDescription snapshot = 2; } +message GetProcedureResultRequest { + required uint64 proc_id = 1; +} + +message GetProcedureResultResponse { + enum State { + NOT_FOUND = 0; + RUNNING = 1; + FINISHED = 2; + } + + required State state = 1; + optional uint64 start_time = 2; + optional uint64 last_update = 3; + optional bytes result = 4; + optional ForeignExceptionMessage exception = 5; +} + message SetQuotaRequest { optional string user_name = 1; optional string user_group = 2; @@ -622,4 +643,7 @@ service MasterService { /** Returns the timestamp of the last major compaction */ rpc getLastMajorCompactionTimestampForRegion(MajorCompactionTimestampForRegionRequest) returns(MajorCompactionTimestampResponse); + + rpc getProcedureResult(GetProcedureResultRequest) + returns(GetProcedureResultResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 5c491bf27ae..df1454d2a6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1360,7 +1360,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } @Override - public void createTable(HTableDescriptor hTableDescriptor, + public long createTable(HTableDescriptor hTableDescriptor, byte [][] splitKeys) throws IOException { if (isStopped()) { throw new MasterNotRunningException(); @@ -1391,9 +1391,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { cpHost.postCreateTable(hTableDescriptor, newRegions); } - // TODO: change the interface to return the procId, - // and add it to the response protobuf. - //return procId; + return procId; } /** @@ -1604,7 +1602,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } @Override - public void deleteTable(final TableName tableName) throws IOException { + public long deleteTable(final TableName tableName) throws IOException { checkInitialized(); if (cpHost != null) { cpHost.preDeleteTable(tableName); @@ -1621,9 +1619,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { cpHost.postDeleteTable(tableName); } - // TODO: change the interface to return the procId, - // and add it to the response protobuf. - //return procId; + return procId; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index e3e40991e74..8cd22a91fbd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -42,6 +42,8 @@ import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.procedure.MasterProcedureManager; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; @@ -85,6 +87,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnaps import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; @@ -157,6 +161,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Pair; import org.apache.zookeeper.KeeperException; @@ -404,11 +409,11 @@ public class MasterRpcServices extends RSRpcServices HTableDescriptor hTableDescriptor = HTableDescriptor.convert(req.getTableSchema()); byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req); try { - master.createTable(hTableDescriptor, splitKeys); + long procId = master.createTable(hTableDescriptor, splitKeys); + return CreateTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { throw new ServiceException(ioe); } - return CreateTableResponse.newBuilder().build(); } @Override @@ -460,11 +465,11 @@ public class MasterRpcServices extends RSRpcServices public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request) throws ServiceException { try { - master.deleteTable(ProtobufUtil.toTableName(request.getTableName())); + long procId = master.deleteTable(ProtobufUtil.toTableName(request.getTableName())); + return DeleteTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { throw new ServiceException(ioe); } - return DeleteTableResponse.newBuilder().build(); } @Override @@ -943,6 +948,44 @@ public class MasterRpcServices extends RSRpcServices } } + @Override + public GetProcedureResultResponse getProcedureResult(RpcController controller, + GetProcedureResultRequest request) throws ServiceException { + LOG.debug("Checking to see if procedure is done procId=" + request.getProcId()); + try { + master.checkInitialized(); + GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder(); + + Pair v = master.getMasterProcedureExecutor() + .getResultOrProcedure(request.getProcId()); + if (v.getFirst() != null) { + ProcedureResult result = v.getFirst(); + builder.setState(GetProcedureResultResponse.State.FINISHED); + builder.setStartTime(result.getStartTime()); + builder.setLastUpdate(result.getLastUpdate()); + if (result.isFailed()) { + builder.setException(result.getException().convert()); + } + if (result.hasResultData()) { + builder.setResult(ByteStringer.wrap(result.getResult())); + } + master.getMasterProcedureExecutor().removeResult(request.getProcId()); + } else { + Procedure proc = v.getSecond(); + if (proc == null) { + builder.setState(GetProcedureResultResponse.State.NOT_FOUND); + } else { + builder.setState(GetProcedureResultResponse.State.RUNNING); + builder.setStartTime(proc.getStartTime()); + builder.setLastUpdate(proc.getLastUpdate()); + } + } + return builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c, ListNamespaceDescriptorsRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index bd4572b1c74..3166a2389fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -100,7 +100,7 @@ public interface MasterServices extends Server { * @param splitKeys Starting row keys for the initial table regions. If null * a single region is created. */ - void createTable(HTableDescriptor desc, byte[][] splitKeys) + long createTable(HTableDescriptor desc, byte[][] splitKeys) throws IOException; /** @@ -108,7 +108,7 @@ public interface MasterServices extends Server { * @param tableName The table name * @throws IOException */ - void deleteTable(final TableName tableName) throws IOException; + long deleteTable(final TableName tableName) throws IOException; /** * Truncate a table diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index f29b0a6eb0b..89f096985a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -125,6 +125,7 @@ public class DeleteTableProcedure LOG.debug("delete '" + getTableName() + "' from filesystem"); DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true); setNextState(DeleteTableState.DELETE_TABLE_UPDATE_DESC_CACHE); + regions = null; break; case DELETE_TABLE_UPDATE_DESC_CACHE: LOG.debug("delete '" + getTableName() + "' descriptor"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 37b20b047cd..d3c1e0ed571 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -224,9 +224,10 @@ public class TestCatalogJanitor { } @Override - public void createTable(HTableDescriptor desc, byte[][] splitKeys) + public long createTable(HTableDescriptor desc, byte[][] splitKeys) throws IOException { // no-op + return -1; } @Override @@ -408,7 +409,9 @@ public class TestCatalogJanitor { } @Override - public void deleteTable(TableName tableName) throws IOException { } + public long deleteTable(TableName tableName) throws IOException { + return -1; + } @Override public void truncateTable(TableName tableName, boolean preserveSplits) throws IOException { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index f9d055246ad..9c654e25ed0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -172,6 +172,7 @@ public class TestHBaseFsck { conf.setInt("hbase.hconnection.threads.max", 2 * POOL_SIZE); conf.setInt("hbase.hconnection.threads.core", POOL_SIZE); conf.setInt("hbase.hbck.close.timeout", 2 * REGION_ONLINE_TIMEOUT); + conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT); TEST_UTIL.startMiniCluster(3); tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS,