diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index f2fc9a58df9..3e767d223f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.replication.TableCFs; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.procedure2.LockInfo; import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaRetriever; import org.apache.hadoop.hbase.quotas.QuotaSettings; @@ -1249,6 +1250,14 @@ public interface Admin extends Abortable, Closeable { ProcedureInfo[] listProcedures() throws IOException; + /** + * List locks. + * @return lock list + * @throws IOException if a remote or network exception occurs + */ + LockInfo[] listLocks() + throws IOException; + /** * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file. * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 99feb14db9d..6859cb306d5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -25,8 +25,6 @@ import static org.apache.hadoop.hbase.client.MetricsConnection.CLIENT_SIDE_METRI import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent; import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsentEx; -import com.google.common.annotations.VisibleForTesting; - import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; @@ -120,6 +118,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.KeeperException; +import com.google.common.annotations.VisibleForTesting; + import edu.umd.cs.findbugs.annotations.Nullable; /** @@ -1282,6 +1282,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return stub.listProcedures(controller, request); } + @Override + public MasterProtos.ListLocksResponse listLocks( + RpcController controller, + MasterProtos.ListLocksRequest request) throws ServiceException { + return stub.listLocks(controller, request); + } + @Override public MasterProtos.AddColumnResponse addColumn( RpcController controller, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index e55a95d0304..7e79c20193e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -80,6 +79,7 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.procedure2.LockInfo; import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaRetriever; import org.apache.hadoop.hbase.quotas.QuotaSettings; @@ -110,6 +110,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringP import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; +import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; @@ -151,6 +152,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedur import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; @@ -191,7 +194,6 @@ import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -import org.apache.hadoop.hbase.util.NonceKey; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -201,7 +203,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.RpcController; @@ -2096,26 +2097,33 @@ public class HBaseAdmin implements Admin { getRpcController(), ListProceduresRequest.newBuilder().build()).getProcedureList(); ProcedureInfo[] procInfoList = new ProcedureInfo[procList.size()]; for (int i = 0; i < procList.size(); i++) { - procInfoList[i] = convert(procList.get(i)); + procInfoList[i] = ProtobufUtil.toProcedureInfo(procList.get(i)); } return procInfoList; } }); } - private static ProcedureInfo convert(final ProcedureProtos.Procedure procProto) { - NonceKey nonceKey = null; - if (procProto.getNonce() != HConstants.NO_NONCE) { - nonceKey = new NonceKey(procProto.getNonceGroup(), procProto.getNonce()); - } - org.apache.hadoop.hbase.ProcedureState procedureState = - org.apache.hadoop.hbase.ProcedureState.valueOf(procProto.getState().name()); - return new ProcedureInfo(procProto.getProcId(), procProto.getClassName(), procProto.getOwner(), - procedureState, procProto.hasParentId() ? procProto.getParentId() : -1, nonceKey, - procProto.hasException()? - ForeignExceptionUtil.toIOException(procProto.getException()): null, - procProto.getLastUpdate(), procProto.getSubmittedTime(), - procProto.hasResult()? procProto.getResult().toByteArray() : null); + @Override + public LockInfo[] listLocks() throws IOException { + return executeCallable(new MasterCallable(getConnection(), + getRpcControllerFactory()) { + @Override + protected LockInfo[] rpcCall() throws Exception { + ListLocksRequest request = ListLocksRequest.newBuilder().build(); + ListLocksResponse response = master.listLocks(getRpcController(), request); + List locksProto = response.getLockList(); + + LockInfo[] locks = new LockInfo[locksProto.size()]; + + for (int i = 0; i < locks.length; i++) { + LockServiceProtos.LockInfo lockProto = locksProto.get(i); + locks[i] = ProtobufUtil.toLockInfo(lockProto); + } + + return locks; + } + }); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java index 72b2a153a06..e3b5b12e076 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java @@ -190,6 +190,12 @@ public class ShortCircuitMasterConnection implements MasterKeepAliveConnection { return stub.listProcedures(controller, request); } + @Override + public ListLocksResponse listLocks(RpcController controller, + ListLocksRequest request) throws ServiceException { + return stub.listLocks(controller, request); + } + @Override public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller, ListNamespaceDescriptorsRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index e969ded5450..04ce040af5a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -53,6 +53,8 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ProcedureInfo; +import org.apache.hadoop.hbase.ProcedureState; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -82,6 +84,7 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.LimitInputStream; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.procedure2.LockInfo; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.hadoop.hbase.quotas.QuotaScope; import org.apache.hadoop.hbase.quotas.QuotaType; @@ -145,11 +148,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; +import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; @@ -166,7 +172,9 @@ import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.DynamicClassLoader; import org.apache.hadoop.hbase.util.ExceptionUtil; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.util.Methods; +import org.apache.hadoop.hbase.util.NonceKey; import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.ipc.RemoteException; @@ -3262,4 +3270,177 @@ public final class ProtobufUtil { int port = Addressing.parsePort(str); return ServerName.valueOf(hostname, port, -1L); } -} \ No newline at end of file + + /** + * @return Convert the current {@link ProcedureInfo} into a Protocol Buffers Procedure + * instance. + */ + public static ProcedureProtos.Procedure toProtoProcedure(ProcedureInfo procedure) { + ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder(); + + builder.setClassName(procedure.getProcName()); + builder.setProcId(procedure.getProcId()); + builder.setSubmittedTime(procedure.getSubmittedTime()); + builder.setState(ProcedureProtos.ProcedureState.valueOf(procedure.getProcState().name())); + builder.setLastUpdate(procedure.getLastUpdate()); + + if (procedure.hasParentId()) { + builder.setParentId(procedure.getParentId()); + } + + if (procedure.hasOwner()) { + builder.setOwner(procedure.getProcOwner()); + } + + if (procedure.isFailed()) { + builder.setException(ForeignExceptionUtil.toProtoForeignException(procedure.getException())); + } + + if (procedure.hasResultData()) { + builder.setResult(UnsafeByteOperations.unsafeWrap(procedure.getResult())); + } + + return builder.build(); + } + + /** + * Helper to convert the protobuf object. + * @return Convert the current Protocol Buffers Procedure to {@link ProcedureInfo} + * instance. + */ + public static ProcedureInfo toProcedureInfo(ProcedureProtos.Procedure procedureProto) { + NonceKey nonceKey = null; + + if (procedureProto.getNonce() != HConstants.NO_NONCE) { + nonceKey = new NonceKey(procedureProto.getNonceGroup(), procedureProto.getNonce()); + } + + return new ProcedureInfo(procedureProto.getProcId(), procedureProto.getClassName(), + procedureProto.hasOwner() ? procedureProto.getOwner() : null, + ProcedureState.valueOf(procedureProto.getState().name()), + procedureProto.hasParentId() ? procedureProto.getParentId() : -1, nonceKey, + procedureProto.hasException() ? + ForeignExceptionUtil.toIOException(procedureProto.getException()) : null, + procedureProto.getLastUpdate(), procedureProto.getSubmittedTime(), + procedureProto.hasResult() ? procedureProto.getResult().toByteArray() : null); + } + + public static LockServiceProtos.ResourceType toProtoResourceType( + LockInfo.ResourceType resourceType) { + switch (resourceType) { + case SERVER: + return LockServiceProtos.ResourceType.RESOURCE_TYPE_SERVER; + case NAMESPACE: + return LockServiceProtos.ResourceType.RESOURCE_TYPE_NAMESPACE; + case TABLE: + return LockServiceProtos.ResourceType.RESOURCE_TYPE_TABLE; + case REGION: + return LockServiceProtos.ResourceType.RESOURCE_TYPE_REGION; + default: + throw new IllegalArgumentException("Unknown resource type: " + resourceType); + } + } + + public static LockInfo.ResourceType toResourceType( + LockServiceProtos.ResourceType resourceTypeProto) { + switch (resourceTypeProto) { + case RESOURCE_TYPE_SERVER: + return LockInfo.ResourceType.SERVER; + case RESOURCE_TYPE_NAMESPACE: + return LockInfo.ResourceType.NAMESPACE; + case RESOURCE_TYPE_TABLE: + return LockInfo.ResourceType.TABLE; + case RESOURCE_TYPE_REGION: + return LockInfo.ResourceType.REGION; + default: + throw new IllegalArgumentException("Unknown resource type: " + resourceTypeProto); + } + } + + public static LockServiceProtos.LockType toProtoLockType( + LockInfo.LockType lockType) { + return LockServiceProtos.LockType.valueOf(lockType.name()); + } + + public static LockInfo.LockType toLockType( + LockServiceProtos.LockType lockTypeProto) { + return LockInfo.LockType.valueOf(lockTypeProto.name()); + } + + public static LockServiceProtos.WaitingProcedure toProtoWaitingProcedure( + LockInfo.WaitingProcedure waitingProcedure) { + LockServiceProtos.WaitingProcedure.Builder builder = LockServiceProtos.WaitingProcedure.newBuilder(); + + ProcedureProtos.Procedure procedureProto = + toProtoProcedure(waitingProcedure.getProcedure()); + + builder + .setLockType(toProtoLockType(waitingProcedure.getLockType())) + .setProcedure(procedureProto); + + return builder.build(); + } + + public static LockInfo.WaitingProcedure toWaitingProcedure( + LockServiceProtos.WaitingProcedure waitingProcedureProto) { + LockInfo.WaitingProcedure waiting = new LockInfo.WaitingProcedure(); + + waiting.setLockType(toLockType(waitingProcedureProto.getLockType())); + + ProcedureInfo procedure = + toProcedureInfo(waitingProcedureProto.getProcedure()); + waiting.setProcedure(procedure); + + return waiting; + } + + public static LockServiceProtos.LockInfo toProtoLockInfo(LockInfo lock) + { + LockServiceProtos.LockInfo.Builder builder = LockServiceProtos.LockInfo.newBuilder(); + + builder + .setResourceType(toProtoResourceType(lock.getResourceType())) + .setResourceName(lock.getResourceName()) + .setLockType(toProtoLockType(lock.getLockType())); + + ProcedureInfo exclusiveLockOwnerProcedure = lock.getExclusiveLockOwnerProcedure(); + + if (exclusiveLockOwnerProcedure != null) { + Procedure exclusiveLockOwnerProcedureProto = + toProtoProcedure(lock.getExclusiveLockOwnerProcedure()); + builder.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureProto); + } + + builder.setSharedLockCount(lock.getSharedLockCount()); + + for (LockInfo.WaitingProcedure waitingProcedure : lock.getWaitingProcedures()) { + builder.addWaitingProcedures(toProtoWaitingProcedure(waitingProcedure)); + } + + return builder.build(); + } + + public static LockInfo toLockInfo(LockServiceProtos.LockInfo lockProto) + { + LockInfo lock = new LockInfo(); + + lock.setResourceType(toResourceType(lockProto.getResourceType())); + lock.setResourceName(lockProto.getResourceName()); + lock.setLockType(toLockType(lockProto.getLockType())); + + if (lockProto.hasExclusiveLockOwnerProcedure()) { + ProcedureInfo exclusiveLockOwnerProcedureProto = + toProcedureInfo(lockProto.getExclusiveLockOwnerProcedure()); + + lock.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureProto); + } + + lock.setSharedLockCount(lockProto.getSharedLockCount()); + + for (LockServiceProtos.WaitingProcedure waitingProcedureProto : lockProto.getWaitingProceduresList()) { + lock.addWaitingProcedure(toWaitingProcedure(waitingProcedureProto)); + } + + return lock; + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/procedure2/LockInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/procedure2/LockInfo.java new file mode 100644 index 00000000000..30ecee8e0aa --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/procedure2/LockInfo.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hbase.ProcedureInfo; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +@InterfaceAudience.Public +public class LockInfo { + @InterfaceAudience.Public + public enum ResourceType { + SERVER, NAMESPACE, TABLE, REGION + } + + @InterfaceAudience.Public + public enum LockType { + EXCLUSIVE, SHARED + } + + @InterfaceAudience.Public + public static class WaitingProcedure { + private LockType lockType; + private ProcedureInfo procedure; + + public WaitingProcedure() { + } + + public LockType getLockType() { + return lockType; + } + + public void setLockType(LockType lockType) { + this.lockType = lockType; + } + + public ProcedureInfo getProcedure() { + return procedure; + } + + public void setProcedure(ProcedureInfo procedure) { + this.procedure = procedure; + } + } + + private ResourceType resourceType; + private String resourceName; + private LockType lockType; + private ProcedureInfo exclusiveLockOwnerProcedure; + private int sharedLockCount; + private final List waitingProcedures; + + public LockInfo() { + waitingProcedures = new ArrayList<>(); + } + + public ResourceType getResourceType() { + return resourceType; + } + + public void setResourceType(ResourceType resourceType) { + this.resourceType = resourceType; + } + + public String getResourceName() { + return resourceName; + } + + public void setResourceName(String resourceName) { + this.resourceName = resourceName; + } + + public LockType getLockType() { + return lockType; + } + + public void setLockType(LockType lockType) { + this.lockType = lockType; + } + + public ProcedureInfo getExclusiveLockOwnerProcedure() { + return exclusiveLockOwnerProcedure; + } + + public void setExclusiveLockOwnerProcedure( + ProcedureInfo exclusiveLockOwnerProcedure) { + this.exclusiveLockOwnerProcedure = exclusiveLockOwnerProcedure; + } + + public int getSharedLockCount() { + return sharedLockCount; + } + + public void setSharedLockCount(int sharedLockCount) { + this.sharedLockCount = sharedLockCount; + } + + public List getWaitingProcedures() { + return waitingProcedures; + } + + public void setWaitingProcedures(List waitingProcedures) { + this.waitingProcedures.clear(); + this.waitingProcedures.addAll(waitingProcedures); + } + + public void addWaitingProcedure(WaitingProcedure waitingProcedure) { + waitingProcedures.add(waitingProcedure); + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java index e11c23ca14b..2c307b7c842 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java @@ -43,7 +43,7 @@ package org.apache.hadoop.hbase.procedure2; * We do not use ReentrantReadWriteLock directly because of its high memory overhead. */ public class LockAndQueue extends ProcedureDeque implements LockStatus { - private long exclusiveLockProcIdOwner = Long.MIN_VALUE; + private Procedure exclusiveLockOwnerProcedure = null; private int sharedLock = 0; // ====================================================================== @@ -57,12 +57,12 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus { @Override public boolean hasExclusiveLock() { - return this.exclusiveLockProcIdOwner != Long.MIN_VALUE; + return this.exclusiveLockOwnerProcedure != null; } @Override public boolean isLockOwner(long procId) { - return exclusiveLockProcIdOwner == procId; + return getExclusiveLockProcIdOwner() == procId; } @Override @@ -75,9 +75,18 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus { return isLockOwner(proc.getProcId()) || hasParentLock(proc); } + @Override + public Procedure getExclusiveLockOwnerProcedure() { + return exclusiveLockOwnerProcedure; + } + @Override public long getExclusiveLockProcIdOwner() { - return exclusiveLockProcIdOwner; + if (exclusiveLockOwnerProcedure == null) { + return Long.MIN_VALUE; + } else { + return exclusiveLockOwnerProcedure.getProcId(); + } } @Override @@ -101,7 +110,7 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus { public boolean tryExclusiveLock(final Procedure proc) { if (isLocked()) return hasLockAccess(proc); - exclusiveLockProcIdOwner = proc.getProcId(); + exclusiveLockOwnerProcedure = proc; return true; } @@ -110,7 +119,7 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus { */ public boolean releaseExclusiveLock(final Procedure proc) { if (isLockOwner(proc.getProcId())) { - exclusiveLockProcIdOwner = Long.MIN_VALUE; + exclusiveLockOwnerProcedure = null; return true; } return false; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java index 9f2aae7a1e1..f32ef763ee0 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java @@ -29,6 +29,7 @@ public interface LockStatus { boolean isLockOwner(long procId); boolean hasParentLock(final Procedure proc); boolean hasLockAccess(final Procedure proc); + Procedure getExclusiveLockOwnerProcedure(); long getExclusiveLockProcIdOwner(); int getSharedLockCount(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java index 617532bb1bc..b5295e7fb07 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.procedure2; import com.google.common.annotations.VisibleForTesting; +import java.util.List; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -120,6 +121,12 @@ public interface ProcedureScheduler { */ boolean waitEvent(ProcedureEvent event, Procedure procedure); + /** + * List lock queues. + * @return the locks + */ + List listLocks(); + /** * Returns the number of elements in this queue. * @return the number of elements in this queue. diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java index b4222c7a1c4..7ce75685c00 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.procedure2; -import com.google.common.base.Preconditions; - import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.Modifier; @@ -33,6 +31,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.util.NonceKey; +import com.google.common.base.Preconditions; + /** * Helper to convert to/from ProcedureProtos */ diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java index 788f4ff4593..176a9003f0a 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java @@ -18,10 +18,13 @@ package org.apache.hadoop.hbase.procedure2; -import com.google.common.annotations.VisibleForTesting; +import java.util.Collections; +import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import com.google.common.annotations.VisibleForTesting; + /** * Simple scheduler for procedures */ @@ -73,4 +76,9 @@ public class SimpleProcedureScheduler extends AbstractProcedureScheduler { @Override public void completionCleanup(Procedure proc) { } + + @Override + public List listLocks() { + return Collections.emptyList(); + } } \ No newline at end of file diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java index 6dbf9b281ab..99853a5687a 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java @@ -104,6 +104,114 @@ public final class LockServiceProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.LockType) } + /** + * Protobuf enum {@code hbase.pb.ResourceType} + */ + public enum ResourceType + implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum { + /** + * RESOURCE_TYPE_SERVER = 1; + */ + RESOURCE_TYPE_SERVER(1), + /** + * RESOURCE_TYPE_NAMESPACE = 2; + */ + RESOURCE_TYPE_NAMESPACE(2), + /** + * RESOURCE_TYPE_TABLE = 3; + */ + RESOURCE_TYPE_TABLE(3), + /** + * RESOURCE_TYPE_REGION = 4; + */ + RESOURCE_TYPE_REGION(4), + ; + + /** + * RESOURCE_TYPE_SERVER = 1; + */ + public static final int RESOURCE_TYPE_SERVER_VALUE = 1; + /** + * RESOURCE_TYPE_NAMESPACE = 2; + */ + public static final int RESOURCE_TYPE_NAMESPACE_VALUE = 2; + /** + * RESOURCE_TYPE_TABLE = 3; + */ + public static final int RESOURCE_TYPE_TABLE_VALUE = 3; + /** + * RESOURCE_TYPE_REGION = 4; + */ + public static final int RESOURCE_TYPE_REGION_VALUE = 4; + + + public final int getNumber() { + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ResourceType valueOf(int value) { + return forNumber(value); + } + + public static ResourceType forNumber(int value) { + switch (value) { + case 1: return RESOURCE_TYPE_SERVER; + case 2: return RESOURCE_TYPE_NAMESPACE; + case 3: return RESOURCE_TYPE_TABLE; + case 4: return RESOURCE_TYPE_REGION; + default: return null; + } + } + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap< + ResourceType> internalValueMap = + new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap() { + public ResourceType findValueByNumber(int number) { + return ResourceType.forNumber(number); + } + }; + + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final ResourceType[] VALUES = values(); + + public static ResourceType valueOf( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ResourceType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.ResourceType) + } + public interface LockRequestOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.LockRequest) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -4898,6 +5006,2129 @@ public final class LockServiceProtos { } + public interface WaitingProcedureOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.WaitingProcedure) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required .hbase.pb.LockType lock_type = 1; + */ + boolean hasLockType(); + /** + * required .hbase.pb.LockType lock_type = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType(); + + /** + * required .hbase.pb.Procedure procedure = 2; + */ + boolean hasProcedure(); + /** + * required .hbase.pb.Procedure procedure = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getProcedure(); + /** + * required .hbase.pb.Procedure procedure = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.WaitingProcedure} + */ + public static final class WaitingProcedure extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.WaitingProcedure) + WaitingProcedureOrBuilder { + // Use WaitingProcedure.newBuilder() to construct. + private WaitingProcedure(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private WaitingProcedure() { + lockType_ = 1; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private WaitingProcedure( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType value = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + lockType_ = rawValue; + } + break; + } + case 18: { + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = procedure_.toBuilder(); + } + procedure_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(procedure_); + procedure_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_WaitingProcedure_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_WaitingProcedure_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder.class); + } + + private int bitField0_; + public static final int LOCK_TYPE_FIELD_NUMBER = 1; + private int lockType_; + /** + * required .hbase.pb.LockType lock_type = 1; + */ + public boolean hasLockType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.LockType lock_type = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType() { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(lockType_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.EXCLUSIVE : result; + } + + public static final int PROCEDURE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure procedure_; + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public boolean hasProcedure() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getProcedure() { + return procedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : procedure_; + } + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder() { + return procedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : procedure_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasLockType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasProcedure()) { + memoizedIsInitialized = 0; + return false; + } + if (!getProcedure().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, lockType_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, getProcedure()); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeEnumSize(1, lockType_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getProcedure()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure other = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure) obj; + + boolean result = true; + result = result && (hasLockType() == other.hasLockType()); + if (hasLockType()) { + result = result && lockType_ == other.lockType_; + } + result = result && (hasProcedure() == other.hasProcedure()); + if (hasProcedure()) { + result = result && getProcedure() + .equals(other.getProcedure()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasLockType()) { + hash = (37 * hash) + LOCK_TYPE_FIELD_NUMBER; + hash = (53 * hash) + lockType_; + } + if (hasProcedure()) { + hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; + hash = (53 * hash) + getProcedure().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.WaitingProcedure} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.WaitingProcedure) + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_WaitingProcedure_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_WaitingProcedure_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getProcedureFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + lockType_ = 1; + bitField0_ = (bitField0_ & ~0x00000001); + if (procedureBuilder_ == null) { + procedure_ = null; + } else { + procedureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_WaitingProcedure_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure result = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lockType_ = lockType_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (procedureBuilder_ == null) { + result.procedure_ = procedure_; + } else { + result.procedure_ = procedureBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.getDefaultInstance()) return this; + if (other.hasLockType()) { + setLockType(other.getLockType()); + } + if (other.hasProcedure()) { + mergeProcedure(other.getProcedure()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasLockType()) { + return false; + } + if (!hasProcedure()) { + return false; + } + if (!getProcedure().isInitialized()) { + return false; + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private int lockType_ = 1; + /** + * required .hbase.pb.LockType lock_type = 1; + */ + public boolean hasLockType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.LockType lock_type = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType() { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(lockType_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.EXCLUSIVE : result; + } + /** + * required .hbase.pb.LockType lock_type = 1; + */ + public Builder setLockType(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + lockType_ = value.getNumber(); + onChanged(); + return this; + } + /** + * required .hbase.pb.LockType lock_type = 1; + */ + public Builder clearLockType() { + bitField0_ = (bitField0_ & ~0x00000001); + lockType_ = 1; + onChanged(); + return this; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure procedure_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> procedureBuilder_; + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public boolean hasProcedure() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getProcedure() { + if (procedureBuilder_ == null) { + return procedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : procedure_; + } else { + return procedureBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public Builder setProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + procedure_ = value; + onChanged(); + } else { + procedureBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public Builder setProcedure( + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + procedure_ = builderForValue.build(); + onChanged(); + } else { + procedureBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public Builder mergeProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + procedure_ != null && + procedure_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()) { + procedure_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.newBuilder(procedure_).mergeFrom(value).buildPartial(); + } else { + procedure_ = value; + } + onChanged(); + } else { + procedureBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public Builder clearProcedure() { + if (procedureBuilder_ == null) { + procedure_ = null; + onChanged(); + } else { + procedureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder getProcedureBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getProcedureFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder() { + if (procedureBuilder_ != null) { + return procedureBuilder_.getMessageOrBuilder(); + } else { + return procedure_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : procedure_; + } + } + /** + * required .hbase.pb.Procedure procedure = 2; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> + getProcedureFieldBuilder() { + if (procedureBuilder_ == null) { + procedureBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder>( + getProcedure(), + getParentForChildren(), + isClean()); + procedure_ = null; + } + return procedureBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.WaitingProcedure) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.WaitingProcedure) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public WaitingProcedure parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new WaitingProcedure(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface LockInfoOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.LockInfo) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required .hbase.pb.ResourceType resource_type = 1; + */ + boolean hasResourceType(); + /** + * required .hbase.pb.ResourceType resource_type = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType getResourceType(); + + /** + * optional string resource_name = 2; + */ + boolean hasResourceName(); + /** + * optional string resource_name = 2; + */ + java.lang.String getResourceName(); + /** + * optional string resource_name = 2; + */ + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getResourceNameBytes(); + + /** + * required .hbase.pb.LockType lock_type = 3; + */ + boolean hasLockType(); + /** + * required .hbase.pb.LockType lock_type = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType(); + + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + boolean hasExclusiveLockOwnerProcedure(); + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getExclusiveLockOwnerProcedure(); + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getExclusiveLockOwnerProcedureOrBuilder(); + + /** + * optional int32 shared_lock_count = 5; + */ + boolean hasSharedLockCount(); + /** + * optional int32 shared_lock_count = 5; + */ + int getSharedLockCount(); + + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + java.util.List + getWaitingProceduresList(); + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getWaitingProcedures(int index); + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + int getWaitingProceduresCount(); + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + java.util.List + getWaitingProceduresOrBuilderList(); + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder getWaitingProceduresOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.LockInfo} + */ + public static final class LockInfo extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.LockInfo) + LockInfoOrBuilder { + // Use LockInfo.newBuilder() to construct. + private LockInfo(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private LockInfo() { + resourceType_ = 1; + resourceName_ = ""; + lockType_ = 1; + sharedLockCount_ = 0; + waitingProcedures_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private LockInfo( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType value = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + resourceType_ = rawValue; + } + break; + } + case 18: { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000002; + resourceName_ = bs; + break; + } + case 24: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType value = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(3, rawValue); + } else { + bitField0_ |= 0x00000004; + lockType_ = rawValue; + } + break; + } + case 34: { + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = exclusiveLockOwnerProcedure_.toBuilder(); + } + exclusiveLockOwnerProcedure_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(exclusiveLockOwnerProcedure_); + exclusiveLockOwnerProcedure_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 40: { + bitField0_ |= 0x00000010; + sharedLockCount_ = input.readInt32(); + break; + } + case 50: { + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + waitingProcedures_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000020; + } + waitingProcedures_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.PARSER, extensionRegistry)); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + waitingProcedures_ = java.util.Collections.unmodifiableList(waitingProcedures_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockInfo_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder.class); + } + + private int bitField0_; + public static final int RESOURCE_TYPE_FIELD_NUMBER = 1; + private int resourceType_; + /** + * required .hbase.pb.ResourceType resource_type = 1; + */ + public boolean hasResourceType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.ResourceType resource_type = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType getResourceType() { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType.valueOf(resourceType_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType.RESOURCE_TYPE_SERVER : result; + } + + public static final int RESOURCE_NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object resourceName_; + /** + * optional string resource_name = 2; + */ + public boolean hasResourceName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string resource_name = 2; + */ + public java.lang.String getResourceName() { + java.lang.Object ref = resourceName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + resourceName_ = s; + } + return s; + } + } + /** + * optional string resource_name = 2; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getResourceNameBytes() { + java.lang.Object ref = resourceName_; + if (ref instanceof java.lang.String) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + resourceName_ = b; + return b; + } else { + return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + } + } + + public static final int LOCK_TYPE_FIELD_NUMBER = 3; + private int lockType_; + /** + * required .hbase.pb.LockType lock_type = 3; + */ + public boolean hasLockType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.LockType lock_type = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType() { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(lockType_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.EXCLUSIVE : result; + } + + public static final int EXCLUSIVE_LOCK_OWNER_PROCEDURE_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure exclusiveLockOwnerProcedure_; + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public boolean hasExclusiveLockOwnerProcedure() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getExclusiveLockOwnerProcedure() { + return exclusiveLockOwnerProcedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : exclusiveLockOwnerProcedure_; + } + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getExclusiveLockOwnerProcedureOrBuilder() { + return exclusiveLockOwnerProcedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : exclusiveLockOwnerProcedure_; + } + + public static final int SHARED_LOCK_COUNT_FIELD_NUMBER = 5; + private int sharedLockCount_; + /** + * optional int32 shared_lock_count = 5; + */ + public boolean hasSharedLockCount() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional int32 shared_lock_count = 5; + */ + public int getSharedLockCount() { + return sharedLockCount_; + } + + public static final int WAITINGPROCEDURES_FIELD_NUMBER = 6; + private java.util.List waitingProcedures_; + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public java.util.List getWaitingProceduresList() { + return waitingProcedures_; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public java.util.List + getWaitingProceduresOrBuilderList() { + return waitingProcedures_; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public int getWaitingProceduresCount() { + return waitingProcedures_.size(); + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getWaitingProcedures(int index) { + return waitingProcedures_.get(index); + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder getWaitingProceduresOrBuilder( + int index) { + return waitingProcedures_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasResourceType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasLockType()) { + memoizedIsInitialized = 0; + return false; + } + if (hasExclusiveLockOwnerProcedure()) { + if (!getExclusiveLockOwnerProcedure().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getWaitingProceduresCount(); i++) { + if (!getWaitingProcedures(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, resourceType_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 2, resourceName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeEnum(3, lockType_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, getExclusiveLockOwnerProcedure()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeInt32(5, sharedLockCount_); + } + for (int i = 0; i < waitingProcedures_.size(); i++) { + output.writeMessage(6, waitingProcedures_.get(i)); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeEnumSize(1, resourceType_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(2, resourceName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeEnumSize(3, lockType_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getExclusiveLockOwnerProcedure()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeInt32Size(5, sharedLockCount_); + } + for (int i = 0; i < waitingProcedures_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(6, waitingProcedures_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo other = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo) obj; + + boolean result = true; + result = result && (hasResourceType() == other.hasResourceType()); + if (hasResourceType()) { + result = result && resourceType_ == other.resourceType_; + } + result = result && (hasResourceName() == other.hasResourceName()); + if (hasResourceName()) { + result = result && getResourceName() + .equals(other.getResourceName()); + } + result = result && (hasLockType() == other.hasLockType()); + if (hasLockType()) { + result = result && lockType_ == other.lockType_; + } + result = result && (hasExclusiveLockOwnerProcedure() == other.hasExclusiveLockOwnerProcedure()); + if (hasExclusiveLockOwnerProcedure()) { + result = result && getExclusiveLockOwnerProcedure() + .equals(other.getExclusiveLockOwnerProcedure()); + } + result = result && (hasSharedLockCount() == other.hasSharedLockCount()); + if (hasSharedLockCount()) { + result = result && (getSharedLockCount() + == other.getSharedLockCount()); + } + result = result && getWaitingProceduresList() + .equals(other.getWaitingProceduresList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasResourceType()) { + hash = (37 * hash) + RESOURCE_TYPE_FIELD_NUMBER; + hash = (53 * hash) + resourceType_; + } + if (hasResourceName()) { + hash = (37 * hash) + RESOURCE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getResourceName().hashCode(); + } + if (hasLockType()) { + hash = (37 * hash) + LOCK_TYPE_FIELD_NUMBER; + hash = (53 * hash) + lockType_; + } + if (hasExclusiveLockOwnerProcedure()) { + hash = (37 * hash) + EXCLUSIVE_LOCK_OWNER_PROCEDURE_FIELD_NUMBER; + hash = (53 * hash) + getExclusiveLockOwnerProcedure().hashCode(); + } + if (hasSharedLockCount()) { + hash = (37 * hash) + SHARED_LOCK_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getSharedLockCount(); + } + if (getWaitingProceduresCount() > 0) { + hash = (37 * hash) + WAITINGPROCEDURES_FIELD_NUMBER; + hash = (53 * hash) + getWaitingProceduresList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.LockInfo} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.LockInfo) + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockInfo_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getExclusiveLockOwnerProcedureFieldBuilder(); + getWaitingProceduresFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + resourceType_ = 1; + bitField0_ = (bitField0_ & ~0x00000001); + resourceName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + lockType_ = 1; + bitField0_ = (bitField0_ & ~0x00000004); + if (exclusiveLockOwnerProcedureBuilder_ == null) { + exclusiveLockOwnerProcedure_ = null; + } else { + exclusiveLockOwnerProcedureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + sharedLockCount_ = 0; + bitField0_ = (bitField0_ & ~0x00000010); + if (waitingProceduresBuilder_ == null) { + waitingProcedures_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + } else { + waitingProceduresBuilder_.clear(); + } + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockInfo_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo result = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.resourceType_ = resourceType_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.resourceName_ = resourceName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.lockType_ = lockType_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (exclusiveLockOwnerProcedureBuilder_ == null) { + result.exclusiveLockOwnerProcedure_ = exclusiveLockOwnerProcedure_; + } else { + result.exclusiveLockOwnerProcedure_ = exclusiveLockOwnerProcedureBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.sharedLockCount_ = sharedLockCount_; + if (waitingProceduresBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { + waitingProcedures_ = java.util.Collections.unmodifiableList(waitingProcedures_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.waitingProcedures_ = waitingProcedures_; + } else { + result.waitingProcedures_ = waitingProceduresBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.getDefaultInstance()) return this; + if (other.hasResourceType()) { + setResourceType(other.getResourceType()); + } + if (other.hasResourceName()) { + bitField0_ |= 0x00000002; + resourceName_ = other.resourceName_; + onChanged(); + } + if (other.hasLockType()) { + setLockType(other.getLockType()); + } + if (other.hasExclusiveLockOwnerProcedure()) { + mergeExclusiveLockOwnerProcedure(other.getExclusiveLockOwnerProcedure()); + } + if (other.hasSharedLockCount()) { + setSharedLockCount(other.getSharedLockCount()); + } + if (waitingProceduresBuilder_ == null) { + if (!other.waitingProcedures_.isEmpty()) { + if (waitingProcedures_.isEmpty()) { + waitingProcedures_ = other.waitingProcedures_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureWaitingProceduresIsMutable(); + waitingProcedures_.addAll(other.waitingProcedures_); + } + onChanged(); + } + } else { + if (!other.waitingProcedures_.isEmpty()) { + if (waitingProceduresBuilder_.isEmpty()) { + waitingProceduresBuilder_.dispose(); + waitingProceduresBuilder_ = null; + waitingProcedures_ = other.waitingProcedures_; + bitField0_ = (bitField0_ & ~0x00000020); + waitingProceduresBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getWaitingProceduresFieldBuilder() : null; + } else { + waitingProceduresBuilder_.addAllMessages(other.waitingProcedures_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasResourceType()) { + return false; + } + if (!hasLockType()) { + return false; + } + if (hasExclusiveLockOwnerProcedure()) { + if (!getExclusiveLockOwnerProcedure().isInitialized()) { + return false; + } + } + for (int i = 0; i < getWaitingProceduresCount(); i++) { + if (!getWaitingProcedures(i).isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private int resourceType_ = 1; + /** + * required .hbase.pb.ResourceType resource_type = 1; + */ + public boolean hasResourceType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.ResourceType resource_type = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType getResourceType() { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType.valueOf(resourceType_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType.RESOURCE_TYPE_SERVER : result; + } + /** + * required .hbase.pb.ResourceType resource_type = 1; + */ + public Builder setResourceType(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + resourceType_ = value.getNumber(); + onChanged(); + return this; + } + /** + * required .hbase.pb.ResourceType resource_type = 1; + */ + public Builder clearResourceType() { + bitField0_ = (bitField0_ & ~0x00000001); + resourceType_ = 1; + onChanged(); + return this; + } + + private java.lang.Object resourceName_ = ""; + /** + * optional string resource_name = 2; + */ + public boolean hasResourceName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string resource_name = 2; + */ + public java.lang.String getResourceName() { + java.lang.Object ref = resourceName_; + if (!(ref instanceof java.lang.String)) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + resourceName_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string resource_name = 2; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getResourceNameBytes() { + java.lang.Object ref = resourceName_; + if (ref instanceof String) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + resourceName_ = b; + return b; + } else { + return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + } + } + /** + * optional string resource_name = 2; + */ + public Builder setResourceName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + resourceName_ = value; + onChanged(); + return this; + } + /** + * optional string resource_name = 2; + */ + public Builder clearResourceName() { + bitField0_ = (bitField0_ & ~0x00000002); + resourceName_ = getDefaultInstance().getResourceName(); + onChanged(); + return this; + } + /** + * optional string resource_name = 2; + */ + public Builder setResourceNameBytes( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + resourceName_ = value; + onChanged(); + return this; + } + + private int lockType_ = 1; + /** + * required .hbase.pb.LockType lock_type = 3; + */ + public boolean hasLockType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.LockType lock_type = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType() { + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(lockType_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.EXCLUSIVE : result; + } + /** + * required .hbase.pb.LockType lock_type = 3; + */ + public Builder setLockType(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + lockType_ = value.getNumber(); + onChanged(); + return this; + } + /** + * required .hbase.pb.LockType lock_type = 3; + */ + public Builder clearLockType() { + bitField0_ = (bitField0_ & ~0x00000004); + lockType_ = 1; + onChanged(); + return this; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure exclusiveLockOwnerProcedure_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> exclusiveLockOwnerProcedureBuilder_; + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public boolean hasExclusiveLockOwnerProcedure() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getExclusiveLockOwnerProcedure() { + if (exclusiveLockOwnerProcedureBuilder_ == null) { + return exclusiveLockOwnerProcedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : exclusiveLockOwnerProcedure_; + } else { + return exclusiveLockOwnerProcedureBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public Builder setExclusiveLockOwnerProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure value) { + if (exclusiveLockOwnerProcedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + exclusiveLockOwnerProcedure_ = value; + onChanged(); + } else { + exclusiveLockOwnerProcedureBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public Builder setExclusiveLockOwnerProcedure( + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (exclusiveLockOwnerProcedureBuilder_ == null) { + exclusiveLockOwnerProcedure_ = builderForValue.build(); + onChanged(); + } else { + exclusiveLockOwnerProcedureBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public Builder mergeExclusiveLockOwnerProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure value) { + if (exclusiveLockOwnerProcedureBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + exclusiveLockOwnerProcedure_ != null && + exclusiveLockOwnerProcedure_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()) { + exclusiveLockOwnerProcedure_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.newBuilder(exclusiveLockOwnerProcedure_).mergeFrom(value).buildPartial(); + } else { + exclusiveLockOwnerProcedure_ = value; + } + onChanged(); + } else { + exclusiveLockOwnerProcedureBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public Builder clearExclusiveLockOwnerProcedure() { + if (exclusiveLockOwnerProcedureBuilder_ == null) { + exclusiveLockOwnerProcedure_ = null; + onChanged(); + } else { + exclusiveLockOwnerProcedureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder getExclusiveLockOwnerProcedureBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getExclusiveLockOwnerProcedureFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getExclusiveLockOwnerProcedureOrBuilder() { + if (exclusiveLockOwnerProcedureBuilder_ != null) { + return exclusiveLockOwnerProcedureBuilder_.getMessageOrBuilder(); + } else { + return exclusiveLockOwnerProcedure_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : exclusiveLockOwnerProcedure_; + } + } + /** + * optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> + getExclusiveLockOwnerProcedureFieldBuilder() { + if (exclusiveLockOwnerProcedureBuilder_ == null) { + exclusiveLockOwnerProcedureBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder>( + getExclusiveLockOwnerProcedure(), + getParentForChildren(), + isClean()); + exclusiveLockOwnerProcedure_ = null; + } + return exclusiveLockOwnerProcedureBuilder_; + } + + private int sharedLockCount_ ; + /** + * optional int32 shared_lock_count = 5; + */ + public boolean hasSharedLockCount() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional int32 shared_lock_count = 5; + */ + public int getSharedLockCount() { + return sharedLockCount_; + } + /** + * optional int32 shared_lock_count = 5; + */ + public Builder setSharedLockCount(int value) { + bitField0_ |= 0x00000010; + sharedLockCount_ = value; + onChanged(); + return this; + } + /** + * optional int32 shared_lock_count = 5; + */ + public Builder clearSharedLockCount() { + bitField0_ = (bitField0_ & ~0x00000010); + sharedLockCount_ = 0; + onChanged(); + return this; + } + + private java.util.List waitingProcedures_ = + java.util.Collections.emptyList(); + private void ensureWaitingProceduresIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + waitingProcedures_ = new java.util.ArrayList(waitingProcedures_); + bitField0_ |= 0x00000020; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder> waitingProceduresBuilder_; + + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public java.util.List getWaitingProceduresList() { + if (waitingProceduresBuilder_ == null) { + return java.util.Collections.unmodifiableList(waitingProcedures_); + } else { + return waitingProceduresBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public int getWaitingProceduresCount() { + if (waitingProceduresBuilder_ == null) { + return waitingProcedures_.size(); + } else { + return waitingProceduresBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getWaitingProcedures(int index) { + if (waitingProceduresBuilder_ == null) { + return waitingProcedures_.get(index); + } else { + return waitingProceduresBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public Builder setWaitingProcedures( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure value) { + if (waitingProceduresBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureWaitingProceduresIsMutable(); + waitingProcedures_.set(index, value); + onChanged(); + } else { + waitingProceduresBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public Builder setWaitingProcedures( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder builderForValue) { + if (waitingProceduresBuilder_ == null) { + ensureWaitingProceduresIsMutable(); + waitingProcedures_.set(index, builderForValue.build()); + onChanged(); + } else { + waitingProceduresBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public Builder addWaitingProcedures(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure value) { + if (waitingProceduresBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureWaitingProceduresIsMutable(); + waitingProcedures_.add(value); + onChanged(); + } else { + waitingProceduresBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public Builder addWaitingProcedures( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure value) { + if (waitingProceduresBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureWaitingProceduresIsMutable(); + waitingProcedures_.add(index, value); + onChanged(); + } else { + waitingProceduresBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public Builder addWaitingProcedures( + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder builderForValue) { + if (waitingProceduresBuilder_ == null) { + ensureWaitingProceduresIsMutable(); + waitingProcedures_.add(builderForValue.build()); + onChanged(); + } else { + waitingProceduresBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public Builder addWaitingProcedures( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder builderForValue) { + if (waitingProceduresBuilder_ == null) { + ensureWaitingProceduresIsMutable(); + waitingProcedures_.add(index, builderForValue.build()); + onChanged(); + } else { + waitingProceduresBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public Builder addAllWaitingProcedures( + java.lang.Iterable values) { + if (waitingProceduresBuilder_ == null) { + ensureWaitingProceduresIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, waitingProcedures_); + onChanged(); + } else { + waitingProceduresBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public Builder clearWaitingProcedures() { + if (waitingProceduresBuilder_ == null) { + waitingProcedures_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + waitingProceduresBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public Builder removeWaitingProcedures(int index) { + if (waitingProceduresBuilder_ == null) { + ensureWaitingProceduresIsMutable(); + waitingProcedures_.remove(index); + onChanged(); + } else { + waitingProceduresBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder getWaitingProceduresBuilder( + int index) { + return getWaitingProceduresFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder getWaitingProceduresOrBuilder( + int index) { + if (waitingProceduresBuilder_ == null) { + return waitingProcedures_.get(index); } else { + return waitingProceduresBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public java.util.List + getWaitingProceduresOrBuilderList() { + if (waitingProceduresBuilder_ != null) { + return waitingProceduresBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(waitingProcedures_); + } + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder addWaitingProceduresBuilder() { + return getWaitingProceduresFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.getDefaultInstance()); + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder addWaitingProceduresBuilder( + int index) { + return getWaitingProceduresFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.getDefaultInstance()); + } + /** + * repeated .hbase.pb.WaitingProcedure waitingProcedures = 6; + */ + public java.util.List + getWaitingProceduresBuilderList() { + return getWaitingProceduresFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder> + getWaitingProceduresFieldBuilder() { + if (waitingProceduresBuilder_ == null) { + waitingProceduresBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder>( + waitingProcedures_, + ((bitField0_ & 0x00000020) == 0x00000020), + getParentForChildren(), + isClean()); + waitingProcedures_ = null; + } + return waitingProceduresBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.LockInfo) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.LockInfo) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public LockInfo parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new LockInfo(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + /** * Protobuf service {@code hbase.pb.LockService} */ @@ -5242,6 +7473,16 @@ public final class LockServiceProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_LockProcedureData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_WaitingProcedure_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_WaitingProcedure_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_LockInfo_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_LockInfo_fieldAccessorTable; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -5252,31 +7493,43 @@ public final class LockServiceProtos { static { java.lang.String[] descriptorData = { "\n\021LockService.proto\022\010hbase.pb\032\013HBase.pro" + - "to\"\332\001\n\013LockRequest\022%\n\tlock_type\030\001 \002(\0162\022." + - "hbase.pb.LockType\022\021\n\tnamespace\030\002 \001(\t\022\'\n\n" + - "table_name\030\003 \001(\0132\023.hbase.pb.TableName\022)\n" + - "\013region_info\030\004 \003(\0132\024.hbase.pb.RegionInfo" + - "\022\023\n\013description\030\005 \001(\t\022\026\n\013nonce_group\030\006 \001" + - "(\004:\0010\022\020\n\005nonce\030\007 \001(\004:\0010\"\037\n\014LockResponse\022" + - "\017\n\007proc_id\030\001 \002(\004\"A\n\024LockHeartbeatRequest" + - "\022\017\n\007proc_id\030\001 \002(\004\022\030\n\nkeep_alive\030\002 \001(\010:\004t" + - "rue\"\224\001\n\025LockHeartbeatResponse\022?\n\013lock_st", - "atus\030\001 \002(\0162*.hbase.pb.LockHeartbeatRespo" + - "nse.LockStatus\022\022\n\ntimeout_ms\030\002 \001(\r\"&\n\nLo" + - "ckStatus\022\014\n\010UNLOCKED\020\001\022\n\n\006LOCKED\020\002\"\325\001\n\021L" + - "ockProcedureData\022%\n\tlock_type\030\001 \002(\0162\022.hb" + - "ase.pb.LockType\022\021\n\tnamespace\030\002 \001(\t\022\'\n\nta" + - "ble_name\030\003 \001(\0132\023.hbase.pb.TableName\022)\n\013r" + - "egion_info\030\004 \003(\0132\024.hbase.pb.RegionInfo\022\023" + - "\n\013description\030\005 \001(\t\022\035\n\016is_master_lock\030\006 " + - "\001(\010:\005false*%\n\010LockType\022\r\n\tEXCLUSIVE\020\001\022\n\n" + - "\006SHARED\020\0022\235\001\n\013LockService\022<\n\013RequestLock", - "\022\025.hbase.pb.LockRequest\032\026.hbase.pb.LockR" + - "esponse\022P\n\rLockHeartbeat\022\036.hbase.pb.Lock" + - "HeartbeatRequest\032\037.hbase.pb.LockHeartbea" + - "tResponseBN\n1org.apache.hadoop.hbase.sha" + - "ded.protobuf.generatedB\021LockServiceProto" + - "sH\001\210\001\001\240\001\001" + "to\032\017Procedure.proto\"\332\001\n\013LockRequest\022%\n\tl" + + "ock_type\030\001 \002(\0162\022.hbase.pb.LockType\022\021\n\tna" + + "mespace\030\002 \001(\t\022\'\n\ntable_name\030\003 \001(\0132\023.hbas" + + "e.pb.TableName\022)\n\013region_info\030\004 \003(\0132\024.hb" + + "ase.pb.RegionInfo\022\023\n\013description\030\005 \001(\t\022\026" + + "\n\013nonce_group\030\006 \001(\004:\0010\022\020\n\005nonce\030\007 \001(\004:\0010" + + "\"\037\n\014LockResponse\022\017\n\007proc_id\030\001 \002(\004\"A\n\024Loc" + + "kHeartbeatRequest\022\017\n\007proc_id\030\001 \002(\004\022\030\n\nke" + + "ep_alive\030\002 \001(\010:\004true\"\224\001\n\025LockHeartbeatRe", + "sponse\022?\n\013lock_status\030\001 \002(\0162*.hbase.pb.L" + + "ockHeartbeatResponse.LockStatus\022\022\n\ntimeo" + + "ut_ms\030\002 \001(\r\"&\n\nLockStatus\022\014\n\010UNLOCKED\020\001\022" + + "\n\n\006LOCKED\020\002\"\325\001\n\021LockProcedureData\022%\n\tloc" + + "k_type\030\001 \002(\0162\022.hbase.pb.LockType\022\021\n\tname" + + "space\030\002 \001(\t\022\'\n\ntable_name\030\003 \001(\0132\023.hbase." + + "pb.TableName\022)\n\013region_info\030\004 \003(\0132\024.hbas" + + "e.pb.RegionInfo\022\023\n\013description\030\005 \001(\t\022\035\n\016" + + "is_master_lock\030\006 \001(\010:\005false\"a\n\020WaitingPr" + + "ocedure\022%\n\tlock_type\030\001 \002(\0162\022.hbase.pb.Lo", + "ckType\022&\n\tprocedure\030\002 \002(\0132\023.hbase.pb.Pro" + + "cedure\"\206\002\n\010LockInfo\022-\n\rresource_type\030\001 \002" + + "(\0162\026.hbase.pb.ResourceType\022\025\n\rresource_n" + + "ame\030\002 \001(\t\022%\n\tlock_type\030\003 \002(\0162\022.hbase.pb." + + "LockType\022;\n\036exclusive_lock_owner_procedu" + + "re\030\004 \001(\0132\023.hbase.pb.Procedure\022\031\n\021shared_" + + "lock_count\030\005 \001(\005\0225\n\021waitingProcedures\030\006 " + + "\003(\0132\032.hbase.pb.WaitingProcedure*%\n\010LockT" + + "ype\022\r\n\tEXCLUSIVE\020\001\022\n\n\006SHARED\020\002*x\n\014Resour" + + "ceType\022\030\n\024RESOURCE_TYPE_SERVER\020\001\022\033\n\027RESO", + "URCE_TYPE_NAMESPACE\020\002\022\027\n\023RESOURCE_TYPE_T" + + "ABLE\020\003\022\030\n\024RESOURCE_TYPE_REGION\020\0042\235\001\n\013Loc" + + "kService\022<\n\013RequestLock\022\025.hbase.pb.LockR" + + "equest\032\026.hbase.pb.LockResponse\022P\n\rLockHe" + + "artbeat\022\036.hbase.pb.LockHeartbeatRequest\032" + + "\037.hbase.pb.LockHeartbeatResponseBN\n1org." + + "apache.hadoop.hbase.shaded.protobuf.gene" + + "ratedB\021LockServiceProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -5290,6 +7543,7 @@ public final class LockServiceProtos { .internalBuildGeneratedFileFrom(descriptorData, new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(), + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor(), }, assigner); internal_static_hbase_pb_LockRequest_descriptor = getDescriptor().getMessageTypes().get(0); @@ -5321,7 +7575,20 @@ public final class LockServiceProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_LockProcedureData_descriptor, new java.lang.String[] { "LockType", "Namespace", "TableName", "RegionInfo", "Description", "IsMasterLock", }); + internal_static_hbase_pb_WaitingProcedure_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_hbase_pb_WaitingProcedure_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_WaitingProcedure_descriptor, + new java.lang.String[] { "LockType", "Procedure", }); + internal_static_hbase_pb_LockInfo_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_hbase_pb_LockInfo_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_LockInfo_descriptor, + new java.lang.String[] { "ResourceType", "ResourceName", "LockType", "ExclusiveLockOwnerProcedure", "SharedLockCount", "WaitingProcedures", }); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(); + org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java index 8ff19b2d29f..e4ce4cb2005 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java @@ -62144,6 +62144,1133 @@ public final class MasterProtos { } + public interface ListLocksRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.ListLocksRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.ListLocksRequest} + */ + public static final class ListLocksRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.ListLocksRequest) + ListLocksRequestOrBuilder { + // Use ListLocksRequest.newBuilder() to construct. + private ListLocksRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListLocksRequest() { + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListLocksRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.Builder.class); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest) obj; + + boolean result = true; + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListLocksRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.ListLocksRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest(this); + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListLocksRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListLocksRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public ListLocksRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new ListLocksRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface ListLocksResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.ListLocksResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + java.util.List + getLockList(); + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo getLock(int index); + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + int getLockCount(); + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + java.util.List + getLockOrBuilderList(); + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder getLockOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.ListLocksResponse} + */ + public static final class ListLocksResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.ListLocksResponse) + ListLocksResponseOrBuilder { + // Use ListLocksResponse.newBuilder() to construct. + private ListLocksResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ListLocksResponse() { + lock_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListLocksResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + lock_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + lock_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + lock_ = java.util.Collections.unmodifiableList(lock_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.Builder.class); + } + + public static final int LOCK_FIELD_NUMBER = 1; + private java.util.List lock_; + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public java.util.List getLockList() { + return lock_; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public java.util.List + getLockOrBuilderList() { + return lock_; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public int getLockCount() { + return lock_.size(); + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo getLock(int index) { + return lock_.get(index); + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder getLockOrBuilder( + int index) { + return lock_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + for (int i = 0; i < getLockCount(); i++) { + if (!getLock(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < lock_.size(); i++) { + output.writeMessage(1, lock_.get(i)); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < lock_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, lock_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse) obj; + + boolean result = true; + result = result && getLockList() + .equals(other.getLockList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getLockCount() > 0) { + hash = (37 * hash) + LOCK_FIELD_NUMBER; + hash = (53 * hash) + getLockList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListLocksResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.ListLocksResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getLockFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (lockBuilder_ == null) { + lock_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + lockBuilder_.clear(); + } + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse(this); + int from_bitField0_ = bitField0_; + if (lockBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + lock_ = java.util.Collections.unmodifiableList(lock_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.lock_ = lock_; + } else { + result.lock_ = lockBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance()) return this; + if (lockBuilder_ == null) { + if (!other.lock_.isEmpty()) { + if (lock_.isEmpty()) { + lock_ = other.lock_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureLockIsMutable(); + lock_.addAll(other.lock_); + } + onChanged(); + } + } else { + if (!other.lock_.isEmpty()) { + if (lockBuilder_.isEmpty()) { + lockBuilder_.dispose(); + lockBuilder_ = null; + lock_ = other.lock_; + bitField0_ = (bitField0_ & ~0x00000001); + lockBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getLockFieldBuilder() : null; + } else { + lockBuilder_.addAllMessages(other.lock_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getLockCount(); i++) { + if (!getLock(i).isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List lock_ = + java.util.Collections.emptyList(); + private void ensureLockIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + lock_ = new java.util.ArrayList(lock_); + bitField0_ |= 0x00000001; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder> lockBuilder_; + + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public java.util.List getLockList() { + if (lockBuilder_ == null) { + return java.util.Collections.unmodifiableList(lock_); + } else { + return lockBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public int getLockCount() { + if (lockBuilder_ == null) { + return lock_.size(); + } else { + return lockBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo getLock(int index) { + if (lockBuilder_ == null) { + return lock_.get(index); + } else { + return lockBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public Builder setLock( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo value) { + if (lockBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLockIsMutable(); + lock_.set(index, value); + onChanged(); + } else { + lockBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public Builder setLock( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder builderForValue) { + if (lockBuilder_ == null) { + ensureLockIsMutable(); + lock_.set(index, builderForValue.build()); + onChanged(); + } else { + lockBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public Builder addLock(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo value) { + if (lockBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLockIsMutable(); + lock_.add(value); + onChanged(); + } else { + lockBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public Builder addLock( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo value) { + if (lockBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLockIsMutable(); + lock_.add(index, value); + onChanged(); + } else { + lockBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public Builder addLock( + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder builderForValue) { + if (lockBuilder_ == null) { + ensureLockIsMutable(); + lock_.add(builderForValue.build()); + onChanged(); + } else { + lockBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public Builder addLock( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder builderForValue) { + if (lockBuilder_ == null) { + ensureLockIsMutable(); + lock_.add(index, builderForValue.build()); + onChanged(); + } else { + lockBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public Builder addAllLock( + java.lang.Iterable values) { + if (lockBuilder_ == null) { + ensureLockIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, lock_); + onChanged(); + } else { + lockBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public Builder clearLock() { + if (lockBuilder_ == null) { + lock_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + lockBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public Builder removeLock(int index) { + if (lockBuilder_ == null) { + ensureLockIsMutable(); + lock_.remove(index); + onChanged(); + } else { + lockBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder getLockBuilder( + int index) { + return getLockFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder getLockOrBuilder( + int index) { + if (lockBuilder_ == null) { + return lock_.get(index); } else { + return lockBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public java.util.List + getLockOrBuilderList() { + if (lockBuilder_ != null) { + return lockBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(lock_); + } + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder addLockBuilder() { + return getLockFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder addLockBuilder( + int index) { + return getLockFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.LockInfo lock = 1; + */ + public java.util.List + getLockBuilderList() { + return getLockFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder> + getLockFieldBuilder() { + if (lockBuilder_ == null) { + lockBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder>( + lock_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + lock_ = null; + } + return lockBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListLocksResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListLocksResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public ListLocksResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new ListLocksResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + public interface SetQuotaRequestOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.SetQuotaRequest) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -70840,6 +71967,14 @@ public final class MasterProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + * rpc ListLocks(.hbase.pb.ListLocksRequest) returns (.hbase.pb.ListLocksResponse); + */ + public abstract void listLocks( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** *
        ** Add a replication peer 
@@ -71445,6 +72580,14 @@ public final class MasterProtos {
           impl.listProcedures(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void listLocks(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
+          impl.listLocks(controller, request, done);
+        }
+
         @java.lang.Override
         public  void addReplicationPeer(
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
@@ -71668,24 +72811,26 @@ public final class MasterProtos {
             case 59:
               return impl.listProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest)request);
             case 60:
-              return impl.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request);
+              return impl.listLocks(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest)request);
             case 61:
-              return impl.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request);
+              return impl.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request);
             case 62:
-              return impl.enableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)request);
+              return impl.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request);
             case 63:
-              return impl.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request);
+              return impl.enableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)request);
             case 64:
-              return impl.getReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)request);
+              return impl.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request);
             case 65:
-              return impl.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request);
+              return impl.getReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)request);
             case 66:
-              return impl.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request);
+              return impl.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request);
             case 67:
-              return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request);
+              return impl.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request);
             case 68:
-              return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
+              return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request);
             case 69:
+              return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
+            case 70:
               return impl.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -71822,24 +72967,26 @@ public final class MasterProtos {
             case 59:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
             case 60:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.getDefaultInstance();
             case 61:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
             case 62:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
             case 63:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
             case 64:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
             case 65:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
             case 66:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
             case 67:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
             case 68:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
             case 69:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+            case 70:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -71976,24 +73123,26 @@ public final class MasterProtos {
             case 59:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
             case 60:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance();
             case 61:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
             case 62:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
             case 63:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
             case 64:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
             case 65:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
             case 66:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
             case 67:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
             case 68:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
             case 69:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+            case 70:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -72754,6 +73903,14 @@ public final class MasterProtos {
         org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done);
 
+    /**
+     * rpc ListLocks(.hbase.pb.ListLocksRequest) returns (.hbase.pb.ListLocksResponse);
+     */
+    public abstract void listLocks(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done);
+
     /**
      * 
      ** Add a replication peer 
@@ -73197,51 +74354,56 @@ public final class MasterProtos {
               done));
           return;
         case 60:
+          this.listLocks(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest)request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
+              done));
+          return;
+        case 61:
           this.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 61:
+        case 62:
           this.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 62:
+        case 63:
           this.enableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 63:
+        case 64:
           this.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 64:
+        case 65:
           this.getReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 65:
+        case 66:
           this.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 66:
+        case 67:
           this.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 67:
+        case 68:
           this.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 68:
+        case 69:
           this.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 69:
+        case 70:
           this.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback(
               done));
@@ -73381,24 +74543,26 @@ public final class MasterProtos {
         case 59:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
         case 60:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.getDefaultInstance();
         case 61:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
         case 62:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
         case 63:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
         case 64:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
         case 65:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
         case 66:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
         case 67:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
         case 68:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
         case 69:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+        case 70:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -73535,24 +74699,26 @@ public final class MasterProtos {
         case 59:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
         case 60:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance();
         case 61:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
         case 62:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
         case 63:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
         case 64:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
         case 65:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
         case 66:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
         case 67:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
         case 68:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
         case 69:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+        case 70:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -74475,12 +75641,27 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()));
       }
 
+      public  void listLocks(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(60),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.class,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance()));
+      }
+
       public  void addReplicationPeer(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(60),
+          getDescriptor().getMethods().get(61),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance(),
@@ -74495,7 +75676,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(61),
+          getDescriptor().getMethods().get(62),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance(),
@@ -74510,7 +75691,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(62),
+          getDescriptor().getMethods().get(63),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance(),
@@ -74525,7 +75706,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(63),
+          getDescriptor().getMethods().get(64),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance(),
@@ -74540,7 +75721,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(64),
+          getDescriptor().getMethods().get(65),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance(),
@@ -74555,7 +75736,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(65),
+          getDescriptor().getMethods().get(66),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance(),
@@ -74570,7 +75751,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(66),
+          getDescriptor().getMethods().get(67),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance(),
@@ -74585,7 +75766,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(67),
+          getDescriptor().getMethods().get(68),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance(),
@@ -74600,7 +75781,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(68),
+          getDescriptor().getMethods().get(69),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance(),
@@ -74615,7 +75796,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(69),
+          getDescriptor().getMethods().get(70),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance(),
@@ -74932,6 +76113,11 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse listLocks(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
       public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse addReplicationPeer(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request)
@@ -75710,12 +76896,24 @@ public final class MasterProtos {
       }
 
 
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse listLocks(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(60),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance());
+      }
+
+
       public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse addReplicationPeer(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(60),
+          getDescriptor().getMethods().get(61),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance());
@@ -75727,7 +76925,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(61),
+          getDescriptor().getMethods().get(62),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance());
@@ -75739,7 +76937,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(62),
+          getDescriptor().getMethods().get(63),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance());
@@ -75751,7 +76949,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(63),
+          getDescriptor().getMethods().get(64),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance());
@@ -75763,7 +76961,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(64),
+          getDescriptor().getMethods().get(65),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance());
@@ -75775,7 +76973,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(65),
+          getDescriptor().getMethods().get(66),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance());
@@ -75787,7 +76985,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(66),
+          getDescriptor().getMethods().get(67),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance());
@@ -75799,7 +76997,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(67),
+          getDescriptor().getMethods().get(68),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance());
@@ -75811,7 +77009,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(68),
+          getDescriptor().getMethods().get(69),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance());
@@ -75823,7 +77021,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(69),
+          getDescriptor().getMethods().get(70),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance());
@@ -76384,6 +77582,16 @@ public final class MasterProtos {
   private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_ListLocksRequest_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_ListLocksRequest_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_ListLocksResponse_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_ListLocksResponse_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_SetQuotaRequest_descriptor;
   private static final 
@@ -76460,383 +77668,387 @@ public final class MasterProtos {
     java.lang.String[] descriptorData = {
       "\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014C" +
       "lient.proto\032\023ClusterStatus.proto\032\023ErrorH" +
-      "andling.proto\032\017Procedure.proto\032\013Quota.pr" +
-      "oto\032\021Replication.proto\"\234\001\n\020AddColumnRequ" +
-      "est\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Table" +
-      "Name\0225\n\017column_families\030\002 \002(\0132\034.hbase.pb" +
-      ".ColumnFamilySchema\022\026\n\013nonce_group\030\003 \001(\004" +
-      ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"$\n\021AddColumnRespon" +
-      "se\022\017\n\007proc_id\030\001 \001(\004\"}\n\023DeleteColumnReque" +
-      "st\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableN",
-      "ame\022\023\n\013column_name\030\002 \002(\014\022\026\n\013nonce_group\030" +
-      "\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024DeleteColu" +
-      "mnResponse\022\017\n\007proc_id\030\001 \001(\004\"\237\001\n\023ModifyCo" +
-      "lumnRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase." +
-      "pb.TableName\0225\n\017column_families\030\002 \002(\0132\034." +
-      "hbase.pb.ColumnFamilySchema\022\026\n\013nonce_gro" +
-      "up\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024ModifyC" +
-      "olumnResponse\022\017\n\007proc_id\030\001 \001(\004\"n\n\021MoveRe" +
-      "gionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" +
-      "egionSpecifier\022.\n\020dest_server_name\030\002 \001(\013",
-      "2\024.hbase.pb.ServerName\"\024\n\022MoveRegionResp" +
-      "onse\"\210\001\n\030MergeTableRegionsRequest\022)\n\006reg" +
-      "ion\030\001 \003(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010" +
-      "forcible\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004 \001" +
-      "(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\",\n\031MergeTableReg" +
-      "ionsResponse\022\017\n\007proc_id\030\001 \001(\004\"@\n\023AssignR" +
-      "egionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." +
-      "RegionSpecifier\"\026\n\024AssignRegionResponse\"" +
-      "X\n\025UnassignRegionRequest\022)\n\006region\030\001 \002(\013" +
-      "2\031.hbase.pb.RegionSpecifier\022\024\n\005force\030\002 \001",
-      "(\010:\005false\"\030\n\026UnassignRegionResponse\"A\n\024O" +
-      "fflineRegionRequest\022)\n\006region\030\001 \002(\0132\031.hb" +
-      "ase.pb.RegionSpecifier\"\027\n\025OfflineRegionR" +
-      "esponse\"\177\n\022CreateTableRequest\022+\n\014table_s" +
-      "chema\030\001 \002(\0132\025.hbase.pb.TableSchema\022\022\n\nsp" +
-      "lit_keys\030\002 \003(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020" +
-      "\n\005nonce\030\004 \001(\004:\0010\"&\n\023CreateTableResponse\022" +
-      "\017\n\007proc_id\030\001 \001(\004\"g\n\022DeleteTableRequest\022\'" +
-      "\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\022" +
-      "\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\001",
-      "0\"&\n\023DeleteTableResponse\022\017\n\007proc_id\030\001 \001(" +
-      "\004\"\207\001\n\024TruncateTableRequest\022&\n\ttableName\030" +
-      "\001 \002(\0132\023.hbase.pb.TableName\022\035\n\016preserveSp" +
-      "lits\030\002 \001(\010:\005false\022\026\n\013nonce_group\030\003 \001(\004:\001" +
-      "0\022\020\n\005nonce\030\004 \001(\004:\0010\"(\n\025TruncateTableResp" +
-      "onse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022EnableTableRequ" +
-      "est\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Table" +
-      "Name\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 " +
-      "\001(\004:\0010\"&\n\023EnableTableResponse\022\017\n\007proc_id" +
-      "\030\001 \001(\004\"h\n\023DisableTableRequest\022\'\n\ntable_n",
-      "ame\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonce_" +
-      "group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\'\n\024Disa" +
-      "bleTableResponse\022\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022Mo" +
-      "difyTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023.h" +
-      "base.pb.TableName\022+\n\014table_schema\030\002 \002(\0132" +
-      "\025.hbase.pb.TableSchema\022\026\n\013nonce_group\030\003 " +
-      "\001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTableR" +
-      "esponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026CreateNamesp" +
-      "aceRequest\022:\n\023namespaceDescriptor\030\001 \002(\0132" +
-      "\035.hbase.pb.NamespaceDescriptor\022\026\n\013nonce_",
-      "group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Crea" +
-      "teNamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"Y\n\026" +
-      "DeleteNamespaceRequest\022\025\n\rnamespaceName\030" +
-      "\001 \002(\t\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003" +
-      " \001(\004:\0010\"*\n\027DeleteNamespaceResponse\022\017\n\007pr" +
-      "oc_id\030\001 \001(\004\"~\n\026ModifyNamespaceRequest\022:\n" +
-      "\023namespaceDescriptor\030\001 \002(\0132\035.hbase.pb.Na" +
-      "mespaceDescriptor\022\026\n\013nonce_group\030\002 \001(\004:\001" +
-      "0\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027ModifyNamespaceRe" +
-      "sponse\022\017\n\007proc_id\030\001 \001(\004\"6\n\035GetNamespaceD",
-      "escriptorRequest\022\025\n\rnamespaceName\030\001 \002(\t\"" +
-      "\\\n\036GetNamespaceDescriptorResponse\022:\n\023nam" +
-      "espaceDescriptor\030\001 \002(\0132\035.hbase.pb.Namesp" +
-      "aceDescriptor\"!\n\037ListNamespaceDescriptor" +
-      "sRequest\"^\n ListNamespaceDescriptorsResp" +
-      "onse\022:\n\023namespaceDescriptor\030\001 \003(\0132\035.hbas" +
-      "e.pb.NamespaceDescriptor\"?\n&ListTableDes" +
-      "criptorsByNamespaceRequest\022\025\n\rnamespaceN" +
-      "ame\030\001 \002(\t\"U\n\'ListTableDescriptorsByNames" +
-      "paceResponse\022*\n\013tableSchema\030\001 \003(\0132\025.hbas",
-      "e.pb.TableSchema\"9\n ListTableNamesByName" +
-      "spaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"K\n!L" +
-      "istTableNamesByNamespaceResponse\022&\n\ttabl" +
-      "eName\030\001 \003(\0132\023.hbase.pb.TableName\"\021\n\017Shut" +
-      "downRequest\"\022\n\020ShutdownResponse\"\023\n\021StopM" +
-      "asterRequest\"\024\n\022StopMasterResponse\"\034\n\032Is" +
-      "InMaintenanceModeRequest\"8\n\033IsInMaintena" +
-      "nceModeResponse\022\031\n\021inMaintenanceMode\030\001 \002" +
-      "(\010\"\037\n\016BalanceRequest\022\r\n\005force\030\001 \001(\010\"\'\n\017B" +
-      "alanceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031",
-      "SetBalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n" +
-      "\013synchronous\030\002 \001(\010\"8\n\032SetBalancerRunning" +
-      "Response\022\032\n\022prev_balance_value\030\001 \001(\010\"\032\n\030" +
-      "IsBalancerEnabledRequest\",\n\031IsBalancerEn" +
-      "abledResponse\022\017\n\007enabled\030\001 \002(\010\"w\n\035SetSpl" +
-      "itOrMergeEnabledRequest\022\017\n\007enabled\030\001 \002(\010" +
-      "\022\023\n\013synchronous\030\002 \001(\010\0220\n\014switch_types\030\003 " +
-      "\003(\0162\032.hbase.pb.MasterSwitchType\"4\n\036SetSp" +
-      "litOrMergeEnabledResponse\022\022\n\nprev_value\030" +
-      "\001 \003(\010\"O\n\034IsSplitOrMergeEnabledRequest\022/\n",
-      "\013switch_type\030\001 \002(\0162\032.hbase.pb.MasterSwit" +
-      "chType\"0\n\035IsSplitOrMergeEnabledResponse\022" +
-      "\017\n\007enabled\030\001 \002(\010\"\022\n\020NormalizeRequest\"+\n\021" +
-      "NormalizeResponse\022\026\n\016normalizer_ran\030\001 \002(" +
-      "\010\")\n\033SetNormalizerRunningRequest\022\n\n\002on\030\001" +
-      " \002(\010\"=\n\034SetNormalizerRunningResponse\022\035\n\025" +
-      "prev_normalizer_value\030\001 \001(\010\"\034\n\032IsNormali" +
-      "zerEnabledRequest\".\n\033IsNormalizerEnabled" +
-      "Response\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalogS" +
-      "canRequest\"-\n\026RunCatalogScanResponse\022\023\n\013",
-      "scan_result\030\001 \001(\005\"-\n\033EnableCatalogJanito" +
-      "rRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCatalo" +
-      "gJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036" +
-      "IsCatalogJanitorEnabledRequest\"0\n\037IsCata" +
-      "logJanitorEnabledResponse\022\r\n\005value\030\001 \002(\010" +
-      "\"\030\n\026RunCleanerChoreRequest\"4\n\027RunCleaner" +
-      "ChoreResponse\022\031\n\021cleaner_chore_ran\030\001 \002(\010" +
-      "\"+\n\035SetCleanerChoreRunningRequest\022\n\n\002on\030" +
-      "\001 \002(\010\"4\n\036SetCleanerChoreRunningResponse\022" +
-      "\022\n\nprev_value\030\001 \001(\010\"\036\n\034IsCleanerChoreEna",
-      "bledRequest\".\n\035IsCleanerChoreEnabledResp" +
-      "onse\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotRequest\022/" +
-      "\n\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDesc" +
-      "ription\",\n\020SnapshotResponse\022\030\n\020expected_" +
-      "timeout\030\001 \002(\003\"\036\n\034GetCompletedSnapshotsRe" +
-      "quest\"Q\n\035GetCompletedSnapshotsResponse\0220" +
-      "\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.SnapshotDes" +
-      "cription\"H\n\025DeleteSnapshotRequest\022/\n\010sna" +
+      "andling.proto\032\021LockService.proto\032\017Proced" +
+      "ure.proto\032\013Quota.proto\032\021Replication.prot" +
+      "o\"\234\001\n\020AddColumnRequest\022\'\n\ntable_name\030\001 \002" +
+      "(\0132\023.hbase.pb.TableName\0225\n\017column_famili" +
+      "es\030\002 \002(\0132\034.hbase.pb.ColumnFamilySchema\022\026" +
+      "\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010" +
+      "\"$\n\021AddColumnResponse\022\017\n\007proc_id\030\001 \001(\004\"}" +
+      "\n\023DeleteColumnRequest\022\'\n\ntable_name\030\001 \002(",
+      "\0132\023.hbase.pb.TableName\022\023\n\013column_name\030\002 " +
+      "\002(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001" +
+      "(\004:\0010\"\'\n\024DeleteColumnResponse\022\017\n\007proc_id" +
+      "\030\001 \001(\004\"\237\001\n\023ModifyColumnRequest\022\'\n\ntable_" +
+      "name\030\001 \002(\0132\023.hbase.pb.TableName\0225\n\017colum" +
+      "n_families\030\002 \002(\0132\034.hbase.pb.ColumnFamily" +
+      "Schema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030" +
+      "\004 \001(\004:\0010\"\'\n\024ModifyColumnResponse\022\017\n\007proc" +
+      "_id\030\001 \001(\004\"n\n\021MoveRegionRequest\022)\n\006region" +
+      "\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\020des",
+      "t_server_name\030\002 \001(\0132\024.hbase.pb.ServerNam" +
+      "e\"\024\n\022MoveRegionResponse\"\210\001\n\030MergeTableRe" +
+      "gionsRequest\022)\n\006region\030\001 \003(\0132\031.hbase.pb." +
+      "RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005false" +
+      "\022\026\n\013nonce_group\030\004 \001(\004:\0010\022\020\n\005nonce\030\005 \001(\004:" +
+      "\0010\",\n\031MergeTableRegionsResponse\022\017\n\007proc_" +
+      "id\030\001 \001(\004\"@\n\023AssignRegionRequest\022)\n\006regio" +
+      "n\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"\026\n\024As" +
+      "signRegionResponse\"X\n\025UnassignRegionRequ" +
+      "est\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpe",
+      "cifier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026Unassign" +
+      "RegionResponse\"A\n\024OfflineRegionRequest\022)" +
+      "\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifie" +
+      "r\"\027\n\025OfflineRegionResponse\"\177\n\022CreateTabl" +
+      "eRequest\022+\n\014table_schema\030\001 \002(\0132\025.hbase.p" +
+      "b.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\022\026\n\013non" +
+      "ce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023C" +
+      "reateTableResponse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022D" +
+      "eleteTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023." +
+      "hbase.pb.TableName\022\026\n\013nonce_group\030\002 \001(\004:",
+      "\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023DeleteTableRespo" +
+      "nse\022\017\n\007proc_id\030\001 \001(\004\"\207\001\n\024TruncateTableRe" +
+      "quest\022&\n\ttableName\030\001 \002(\0132\023.hbase.pb.Tabl" +
+      "eName\022\035\n\016preserveSplits\030\002 \001(\010:\005false\022\026\n\013" +
+      "nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"(" +
+      "\n\025TruncateTableResponse\022\017\n\007proc_id\030\001 \001(\004" +
+      "\"g\n\022EnableTableRequest\022\'\n\ntable_name\030\001 \002" +
+      "(\0132\023.hbase.pb.TableName\022\026\n\013nonce_group\030\002" +
+      " \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023EnableTable" +
+      "Response\022\017\n\007proc_id\030\001 \001(\004\"h\n\023DisableTabl",
+      "eRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb." +
+      "TableName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005non" +
+      "ce\030\003 \001(\004:\0010\"\'\n\024DisableTableResponse\022\017\n\007p" +
+      "roc_id\030\001 \001(\004\"\224\001\n\022ModifyTableRequest\022\'\n\nt" +
+      "able_name\030\001 \002(\0132\023.hbase.pb.TableName\022+\n\014" +
+      "table_schema\030\002 \002(\0132\025.hbase.pb.TableSchem" +
+      "a\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004" +
+      ":\0010\"&\n\023ModifyTableResponse\022\017\n\007proc_id\030\001 " +
+      "\001(\004\"~\n\026CreateNamespaceRequest\022:\n\023namespa" +
+      "ceDescriptor\030\001 \002(\0132\035.hbase.pb.NamespaceD",
+      "escriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005non" +
+      "ce\030\003 \001(\004:\0010\"*\n\027CreateNamespaceResponse\022\017" +
+      "\n\007proc_id\030\001 \001(\004\"Y\n\026DeleteNamespaceReques" +
+      "t\022\025\n\rnamespaceName\030\001 \002(\t\022\026\n\013nonce_group\030" +
+      "\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027DeleteName" +
+      "spaceResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026Modify" +
+      "NamespaceRequest\022:\n\023namespaceDescriptor\030" +
+      "\001 \002(\0132\035.hbase.pb.NamespaceDescriptor\022\026\n\013" +
+      "nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*" +
+      "\n\027ModifyNamespaceResponse\022\017\n\007proc_id\030\001 \001",
+      "(\004\"6\n\035GetNamespaceDescriptorRequest\022\025\n\rn" +
+      "amespaceName\030\001 \002(\t\"\\\n\036GetNamespaceDescri" +
+      "ptorResponse\022:\n\023namespaceDescriptor\030\001 \002(" +
+      "\0132\035.hbase.pb.NamespaceDescriptor\"!\n\037List" +
+      "NamespaceDescriptorsRequest\"^\n ListNames" +
+      "paceDescriptorsResponse\022:\n\023namespaceDesc" +
+      "riptor\030\001 \003(\0132\035.hbase.pb.NamespaceDescrip" +
+      "tor\"?\n&ListTableDescriptorsByNamespaceRe" +
+      "quest\022\025\n\rnamespaceName\030\001 \002(\t\"U\n\'ListTabl" +
+      "eDescriptorsByNamespaceResponse\022*\n\013table",
+      "Schema\030\001 \003(\0132\025.hbase.pb.TableSchema\"9\n L" +
+      "istTableNamesByNamespaceRequest\022\025\n\rnames" +
+      "paceName\030\001 \002(\t\"K\n!ListTableNamesByNamesp" +
+      "aceResponse\022&\n\ttableName\030\001 \003(\0132\023.hbase.p" +
+      "b.TableName\"\021\n\017ShutdownRequest\"\022\n\020Shutdo" +
+      "wnResponse\"\023\n\021StopMasterRequest\"\024\n\022StopM" +
+      "asterResponse\"\034\n\032IsInMaintenanceModeRequ" +
+      "est\"8\n\033IsInMaintenanceModeResponse\022\031\n\021in" +
+      "MaintenanceMode\030\001 \002(\010\"\037\n\016BalanceRequest\022" +
+      "\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n\014bal",
+      "ancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunningReq" +
+      "uest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n" +
+      "\032SetBalancerRunningResponse\022\032\n\022prev_bala" +
+      "nce_value\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequ" +
+      "est\",\n\031IsBalancerEnabledResponse\022\017\n\007enab" +
+      "led\030\001 \002(\010\"w\n\035SetSplitOrMergeEnabledReque" +
+      "st\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010" +
+      "\0220\n\014switch_types\030\003 \003(\0162\032.hbase.pb.Master" +
+      "SwitchType\"4\n\036SetSplitOrMergeEnabledResp" +
+      "onse\022\022\n\nprev_value\030\001 \003(\010\"O\n\034IsSplitOrMer",
+      "geEnabledRequest\022/\n\013switch_type\030\001 \002(\0162\032." +
+      "hbase.pb.MasterSwitchType\"0\n\035IsSplitOrMe" +
+      "rgeEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"\022\n\020N" +
+      "ormalizeRequest\"+\n\021NormalizeResponse\022\026\n\016" +
+      "normalizer_ran\030\001 \002(\010\")\n\033SetNormalizerRun" +
+      "ningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormalizer" +
+      "RunningResponse\022\035\n\025prev_normalizer_value" +
+      "\030\001 \001(\010\"\034\n\032IsNormalizerEnabledRequest\".\n\033" +
+      "IsNormalizerEnabledResponse\022\017\n\007enabled\030\001" +
+      " \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026RunCata",
+      "logScanResponse\022\023\n\013scan_result\030\001 \001(\005\"-\n\033" +
+      "EnableCatalogJanitorRequest\022\016\n\006enable\030\001 " +
+      "\002(\010\"2\n\034EnableCatalogJanitorResponse\022\022\n\np" +
+      "rev_value\030\001 \001(\010\" \n\036IsCatalogJanitorEnabl" +
+      "edRequest\"0\n\037IsCatalogJanitorEnabledResp" +
+      "onse\022\r\n\005value\030\001 \002(\010\"\030\n\026RunCleanerChoreRe" +
+      "quest\"4\n\027RunCleanerChoreResponse\022\031\n\021clea" +
+      "ner_chore_ran\030\001 \002(\010\"+\n\035SetCleanerChoreRu" +
+      "nningRequest\022\n\n\002on\030\001 \002(\010\"4\n\036SetCleanerCh" +
+      "oreRunningResponse\022\022\n\nprev_value\030\001 \001(\010\"\036",
+      "\n\034IsCleanerChoreEnabledRequest\".\n\035IsClea" +
+      "nerChoreEnabledResponse\022\r\n\005value\030\001 \002(\010\"B" +
+      "\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hb" +
+      "ase.pb.SnapshotDescription\",\n\020SnapshotRe" +
+      "sponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034GetC" +
+      "ompletedSnapshotsRequest\"Q\n\035GetCompleted" +
+      "SnapshotsResponse\0220\n\tsnapshots\030\001 \003(\0132\035.h" +
+      "base.pb.SnapshotDescription\"H\n\025DeleteSna" +
+      "pshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.p" +
+      "b.SnapshotDescription\"\030\n\026DeleteSnapshotR",
+      "esponse\"s\n\026RestoreSnapshotRequest\022/\n\010sna" +
       "pshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescripti" +
-      "on\"\030\n\026DeleteSnapshotResponse\"s\n\026RestoreS",
-      "napshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase" +
-      ".pb.SnapshotDescription\022\026\n\013nonce_group\030\002" +
-      " \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027RestoreSnap" +
-      "shotResponse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsSnaps" +
-      "hotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase" +
-      ".pb.SnapshotDescription\"^\n\026IsSnapshotDon" +
-      "eResponse\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010snapsh" +
-      "ot\030\002 \001(\0132\035.hbase.pb.SnapshotDescription\"" +
-      "O\n\034IsRestoreSnapshotDoneRequest\022/\n\010snaps" +
-      "hot\030\001 \001(\0132\035.hbase.pb.SnapshotDescription",
-      "\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n\004don" +
-      "e\030\001 \001(\010:\005false\"F\n\033GetSchemaAlterStatusRe" +
-      "quest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Tab" +
-      "leName\"T\n\034GetSchemaAlterStatusResponse\022\035" +
-      "\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rtotal_r" +
-      "egions\030\002 \001(\r\"\213\001\n\032GetTableDescriptorsRequ" +
-      "est\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.Tabl" +
-      "eName\022\r\n\005regex\030\002 \001(\t\022!\n\022include_sys_tabl" +
-      "es\030\003 \001(\010:\005false\022\021\n\tnamespace\030\004 \001(\t\"J\n\033Ge" +
-      "tTableDescriptorsResponse\022+\n\014table_schem",
-      "a\030\001 \003(\0132\025.hbase.pb.TableSchema\"[\n\024GetTab" +
-      "leNamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022include" +
-      "_sys_tables\030\002 \001(\010:\005false\022\021\n\tnamespace\030\003 " +
-      "\001(\t\"A\n\025GetTableNamesResponse\022(\n\013table_na" +
-      "mes\030\001 \003(\0132\023.hbase.pb.TableName\"?\n\024GetTab" +
-      "leStateRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hba" +
-      "se.pb.TableName\"B\n\025GetTableStateResponse" +
-      "\022)\n\013table_state\030\001 \002(\0132\024.hbase.pb.TableSt" +
-      "ate\"\031\n\027GetClusterStatusRequest\"K\n\030GetClu" +
-      "sterStatusResponse\022/\n\016cluster_status\030\001 \002",
-      "(\0132\027.hbase.pb.ClusterStatus\"\030\n\026IsMasterR" +
-      "unningRequest\"4\n\027IsMasterRunningResponse" +
-      "\022\031\n\021is_master_running\030\001 \002(\010\"I\n\024ExecProce" +
-      "dureRequest\0221\n\tprocedure\030\001 \002(\0132\036.hbase.p" +
-      "b.ProcedureDescription\"F\n\025ExecProcedureR" +
-      "esponse\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013ret" +
-      "urn_data\030\002 \001(\014\"K\n\026IsProcedureDoneRequest" +
-      "\0221\n\tprocedure\030\001 \001(\0132\036.hbase.pb.Procedure" +
-      "Description\"`\n\027IsProcedureDoneResponse\022\023" +
-      "\n\004done\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132\036.",
-      "hbase.pb.ProcedureDescription\",\n\031GetProc" +
-      "edureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"\375\001\n\032" +
-      "GetProcedureResultResponse\0229\n\005state\030\001 \002(" +
-      "\0162*.hbase.pb.GetProcedureResultResponse." +
-      "State\022\026\n\016submitted_time\030\002 \001(\004\022\023\n\013last_up" +
-      "date\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texception\030" +
-      "\005 \001(\0132!.hbase.pb.ForeignExceptionMessage" +
-      "\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n" +
-      "\010FINISHED\020\002\"M\n\025AbortProcedureRequest\022\017\n\007" +
-      "proc_id\030\001 \002(\004\022#\n\025mayInterruptIfRunning\030\002",
-      " \001(\010:\004true\"6\n\026AbortProcedureResponse\022\034\n\024" +
-      "is_procedure_aborted\030\001 \002(\010\"\027\n\025ListProced" +
-      "uresRequest\"@\n\026ListProceduresResponse\022&\n" +
-      "\tprocedure\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001" +
-      "\n\017SetQuotaRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\n" +
-      "user_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\nt" +
-      "able_name\030\004 \001(\0132\023.hbase.pb.TableName\022\022\n\n" +
-      "remove_all\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010" +
-      "\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.ThrottleRe" +
-      "quest\"\022\n\020SetQuotaResponse\"J\n\037MajorCompac",
-      "tionTimestampRequest\022\'\n\ntable_name\030\001 \002(\013" +
-      "2\023.hbase.pb.TableName\"U\n(MajorCompaction" +
-      "TimestampForRegionRequest\022)\n\006region\030\001 \002(" +
-      "\0132\031.hbase.pb.RegionSpecifier\"@\n MajorCom" +
-      "pactionTimestampResponse\022\034\n\024compaction_t" +
-      "imestamp\030\001 \002(\003\"\035\n\033SecurityCapabilitiesRe" +
-      "quest\"\354\001\n\034SecurityCapabilitiesResponse\022G" +
-      "\n\014capabilities\030\001 \003(\01621.hbase.pb.Security" +
-      "CapabilitiesResponse.Capability\"\202\001\n\nCapa" +
-      "bility\022\031\n\025SIMPLE_AUTHENTICATION\020\000\022\031\n\025SEC",
-      "URE_AUTHENTICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022" +
-      "\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_VISIBILI" +
-      "TY\020\004\"\"\n ListDrainingRegionServersRequest" +
-      "\"N\n!ListDrainingRegionServersResponse\022)\n" +
-      "\013server_name\030\001 \003(\0132\024.hbase.pb.ServerName" +
-      "\"F\n\031DrainRegionServersRequest\022)\n\013server_" +
-      "name\030\001 \003(\0132\024.hbase.pb.ServerName\"\034\n\032Drai" +
-      "nRegionServersResponse\"P\n#RemoveDrainFro" +
-      "mRegionServersRequest\022)\n\013server_name\030\001 \003" +
-      "(\0132\024.hbase.pb.ServerName\"&\n$RemoveDrainF",
-      "romRegionServersResponse*(\n\020MasterSwitch" +
-      "Type\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\3013\n\rMasterSer" +
-      "vice\022e\n\024GetSchemaAlterStatus\022%.hbase.pb." +
-      "GetSchemaAlterStatusRequest\032&.hbase.pb.G" +
-      "etSchemaAlterStatusResponse\022b\n\023GetTableD" +
-      "escriptors\022$.hbase.pb.GetTableDescriptor" +
-      "sRequest\032%.hbase.pb.GetTableDescriptorsR" +
-      "esponse\022P\n\rGetTableNames\022\036.hbase.pb.GetT" +
-      "ableNamesRequest\032\037.hbase.pb.GetTableName" +
-      "sResponse\022Y\n\020GetClusterStatus\022!.hbase.pb",
-      ".GetClusterStatusRequest\032\".hbase.pb.GetC" +
-      "lusterStatusResponse\022V\n\017IsMasterRunning\022" +
-      " .hbase.pb.IsMasterRunningRequest\032!.hbas" +
-      "e.pb.IsMasterRunningResponse\022D\n\tAddColum" +
-      "n\022\032.hbase.pb.AddColumnRequest\032\033.hbase.pb" +
-      ".AddColumnResponse\022M\n\014DeleteColumn\022\035.hba" +
-      "se.pb.DeleteColumnRequest\032\036.hbase.pb.Del" +
-      "eteColumnResponse\022M\n\014ModifyColumn\022\035.hbas" +
-      "e.pb.ModifyColumnRequest\032\036.hbase.pb.Modi" +
-      "fyColumnResponse\022G\n\nMoveRegion\022\033.hbase.p",
-      "b.MoveRegionRequest\032\034.hbase.pb.MoveRegio" +
-      "nResponse\022\\\n\021MergeTableRegions\022\".hbase.p" +
-      "b.MergeTableRegionsRequest\032#.hbase.pb.Me" +
-      "rgeTableRegionsResponse\022M\n\014AssignRegion\022" +
-      "\035.hbase.pb.AssignRegionRequest\032\036.hbase.p" +
-      "b.AssignRegionResponse\022S\n\016UnassignRegion" +
-      "\022\037.hbase.pb.UnassignRegionRequest\032 .hbas" +
-      "e.pb.UnassignRegionResponse\022P\n\rOfflineRe" +
-      "gion\022\036.hbase.pb.OfflineRegionRequest\032\037.h" +
-      "base.pb.OfflineRegionResponse\022J\n\013DeleteT",
-      "able\022\034.hbase.pb.DeleteTableRequest\032\035.hba" +
-      "se.pb.DeleteTableResponse\022P\n\rtruncateTab" +
-      "le\022\036.hbase.pb.TruncateTableRequest\032\037.hba" +
-      "se.pb.TruncateTableResponse\022J\n\013EnableTab" +
-      "le\022\034.hbase.pb.EnableTableRequest\032\035.hbase" +
-      ".pb.EnableTableResponse\022M\n\014DisableTable\022" +
-      "\035.hbase.pb.DisableTableRequest\032\036.hbase.p" +
-      "b.DisableTableResponse\022J\n\013ModifyTable\022\034." +
-      "hbase.pb.ModifyTableRequest\032\035.hbase.pb.M" +
-      "odifyTableResponse\022J\n\013CreateTable\022\034.hbas",
-      "e.pb.CreateTableRequest\032\035.hbase.pb.Creat" +
-      "eTableResponse\022A\n\010Shutdown\022\031.hbase.pb.Sh" +
-      "utdownRequest\032\032.hbase.pb.ShutdownRespons" +
-      "e\022G\n\nStopMaster\022\033.hbase.pb.StopMasterReq" +
-      "uest\032\034.hbase.pb.StopMasterResponse\022h\n\031Is" +
-      "MasterInMaintenanceMode\022$.hbase.pb.IsInM" +
-      "aintenanceModeRequest\032%.hbase.pb.IsInMai" +
-      "ntenanceModeResponse\022>\n\007Balance\022\030.hbase." +
-      "pb.BalanceRequest\032\031.hbase.pb.BalanceResp" +
-      "onse\022_\n\022SetBalancerRunning\022#.hbase.pb.Se",
-      "tBalancerRunningRequest\032$.hbase.pb.SetBa" +
-      "lancerRunningResponse\022\\\n\021IsBalancerEnabl" +
-      "ed\022\".hbase.pb.IsBalancerEnabledRequest\032#" +
-      ".hbase.pb.IsBalancerEnabledResponse\022k\n\026S" +
-      "etSplitOrMergeEnabled\022\'.hbase.pb.SetSpli" +
-      "tOrMergeEnabledRequest\032(.hbase.pb.SetSpl" +
-      "itOrMergeEnabledResponse\022h\n\025IsSplitOrMer" +
-      "geEnabled\022&.hbase.pb.IsSplitOrMergeEnabl" +
-      "edRequest\032\'.hbase.pb.IsSplitOrMergeEnabl" +
-      "edResponse\022D\n\tNormalize\022\032.hbase.pb.Norma",
-      "lizeRequest\032\033.hbase.pb.NormalizeResponse" +
-      "\022e\n\024SetNormalizerRunning\022%.hbase.pb.SetN" +
-      "ormalizerRunningRequest\032&.hbase.pb.SetNo" +
-      "rmalizerRunningResponse\022b\n\023IsNormalizerE" +
-      "nabled\022$.hbase.pb.IsNormalizerEnabledReq" +
-      "uest\032%.hbase.pb.IsNormalizerEnabledRespo" +
-      "nse\022S\n\016RunCatalogScan\022\037.hbase.pb.RunCata" +
-      "logScanRequest\032 .hbase.pb.RunCatalogScan" +
-      "Response\022e\n\024EnableCatalogJanitor\022%.hbase" +
-      ".pb.EnableCatalogJanitorRequest\032&.hbase.",
-      "pb.EnableCatalogJanitorResponse\022n\n\027IsCat" +
-      "alogJanitorEnabled\022(.hbase.pb.IsCatalogJ" +
-      "anitorEnabledRequest\032).hbase.pb.IsCatalo" +
-      "gJanitorEnabledResponse\022V\n\017RunCleanerCho" +
-      "re\022 .hbase.pb.RunCleanerChoreRequest\032!.h" +
-      "base.pb.RunCleanerChoreResponse\022k\n\026SetCl" +
-      "eanerChoreRunning\022\'.hbase.pb.SetCleanerC" +
-      "horeRunningRequest\032(.hbase.pb.SetCleaner" +
-      "ChoreRunningResponse\022h\n\025IsCleanerChoreEn" +
-      "abled\022&.hbase.pb.IsCleanerChoreEnabledRe",
-      "quest\032\'.hbase.pb.IsCleanerChoreEnabledRe" +
-      "sponse\022^\n\021ExecMasterService\022#.hbase.pb.C" +
-      "oprocessorServiceRequest\032$.hbase.pb.Copr" +
-      "ocessorServiceResponse\022A\n\010Snapshot\022\031.hba" +
-      "se.pb.SnapshotRequest\032\032.hbase.pb.Snapsho" +
-      "tResponse\022h\n\025GetCompletedSnapshots\022&.hba" +
-      "se.pb.GetCompletedSnapshotsRequest\032\'.hba" +
-      "se.pb.GetCompletedSnapshotsResponse\022S\n\016D" +
-      "eleteSnapshot\022\037.hbase.pb.DeleteSnapshotR" +
-      "equest\032 .hbase.pb.DeleteSnapshotResponse",
-      "\022S\n\016IsSnapshotDone\022\037.hbase.pb.IsSnapshot" +
-      "DoneRequest\032 .hbase.pb.IsSnapshotDoneRes" +
-      "ponse\022V\n\017RestoreSnapshot\022 .hbase.pb.Rest" +
-      "oreSnapshotRequest\032!.hbase.pb.RestoreSna" +
-      "pshotResponse\022P\n\rExecProcedure\022\036.hbase.p" +
-      "b.ExecProcedureRequest\032\037.hbase.pb.ExecPr" +
-      "ocedureResponse\022W\n\024ExecProcedureWithRet\022" +
-      "\036.hbase.pb.ExecProcedureRequest\032\037.hbase." +
-      "pb.ExecProcedureResponse\022V\n\017IsProcedureD" +
-      "one\022 .hbase.pb.IsProcedureDoneRequest\032!.",
-      "hbase.pb.IsProcedureDoneResponse\022V\n\017Modi" +
-      "fyNamespace\022 .hbase.pb.ModifyNamespaceRe" +
-      "quest\032!.hbase.pb.ModifyNamespaceResponse" +
-      "\022V\n\017CreateNamespace\022 .hbase.pb.CreateNam" +
-      "espaceRequest\032!.hbase.pb.CreateNamespace" +
-      "Response\022V\n\017DeleteNamespace\022 .hbase.pb.D" +
-      "eleteNamespaceRequest\032!.hbase.pb.DeleteN" +
-      "amespaceResponse\022k\n\026GetNamespaceDescript" +
-      "or\022\'.hbase.pb.GetNamespaceDescriptorRequ" +
-      "est\032(.hbase.pb.GetNamespaceDescriptorRes",
-      "ponse\022q\n\030ListNamespaceDescriptors\022).hbas" +
-      "e.pb.ListNamespaceDescriptorsRequest\032*.h" +
-      "base.pb.ListNamespaceDescriptorsResponse" +
-      "\022\206\001\n\037ListTableDescriptorsByNamespace\0220.h" +
-      "base.pb.ListTableDescriptorsByNamespaceR" +
-      "equest\0321.hbase.pb.ListTableDescriptorsBy" +
-      "NamespaceResponse\022t\n\031ListTableNamesByNam" +
-      "espace\022*.hbase.pb.ListTableNamesByNamesp" +
-      "aceRequest\032+.hbase.pb.ListTableNamesByNa" +
-      "mespaceResponse\022P\n\rGetTableState\022\036.hbase",
-      ".pb.GetTableStateRequest\032\037.hbase.pb.GetT" +
-      "ableStateResponse\022A\n\010SetQuota\022\031.hbase.pb" +
-      ".SetQuotaRequest\032\032.hbase.pb.SetQuotaResp" +
-      "onse\022x\n\037getLastMajorCompactionTimestamp\022" +
-      ").hbase.pb.MajorCompactionTimestampReque" +
-      "st\032*.hbase.pb.MajorCompactionTimestampRe" +
-      "sponse\022\212\001\n(getLastMajorCompactionTimesta" +
-      "mpForRegion\0222.hbase.pb.MajorCompactionTi" +
-      "mestampForRegionRequest\032*.hbase.pb.Major" +
-      "CompactionTimestampResponse\022_\n\022getProced",
-      "ureResult\022#.hbase.pb.GetProcedureResultR" +
-      "equest\032$.hbase.pb.GetProcedureResultResp" +
-      "onse\022h\n\027getSecurityCapabilities\022%.hbase." +
-      "pb.SecurityCapabilitiesRequest\032&.hbase.p" +
-      "b.SecurityCapabilitiesResponse\022S\n\016AbortP" +
-      "rocedure\022\037.hbase.pb.AbortProcedureReques" +
-      "t\032 .hbase.pb.AbortProcedureResponse\022S\n\016L" +
-      "istProcedures\022\037.hbase.pb.ListProceduresR" +
-      "equest\032 .hbase.pb.ListProceduresResponse" +
-      "\022_\n\022AddReplicationPeer\022#.hbase.pb.AddRep",
-      "licationPeerRequest\032$.hbase.pb.AddReplic" +
-      "ationPeerResponse\022h\n\025RemoveReplicationPe" +
-      "er\022&.hbase.pb.RemoveReplicationPeerReque" +
-      "st\032\'.hbase.pb.RemoveReplicationPeerRespo" +
-      "nse\022h\n\025EnableReplicationPeer\022&.hbase.pb." +
-      "EnableReplicationPeerRequest\032\'.hbase.pb." +
-      "EnableReplicationPeerResponse\022k\n\026Disable" +
-      "ReplicationPeer\022\'.hbase.pb.DisableReplic" +
-      "ationPeerRequest\032(.hbase.pb.DisableRepli" +
-      "cationPeerResponse\022q\n\030GetReplicationPeer",
-      "Config\022).hbase.pb.GetReplicationPeerConf" +
-      "igRequest\032*.hbase.pb.GetReplicationPeerC" +
-      "onfigResponse\022z\n\033UpdateReplicationPeerCo" +
-      "nfig\022,.hbase.pb.UpdateReplicationPeerCon" +
-      "figRequest\032-.hbase.pb.UpdateReplicationP" +
-      "eerConfigResponse\022e\n\024ListReplicationPeer" +
-      "s\022%.hbase.pb.ListReplicationPeersRequest" +
-      "\032&.hbase.pb.ListReplicationPeersResponse" +
-      "\022t\n\031listDrainingRegionServers\022*.hbase.pb" +
-      ".ListDrainingRegionServersRequest\032+.hbas",
-      "e.pb.ListDrainingRegionServersResponse\022_" +
-      "\n\022drainRegionServers\022#.hbase.pb.DrainReg" +
-      "ionServersRequest\032$.hbase.pb.DrainRegion" +
-      "ServersResponse\022}\n\034removeDrainFromRegion" +
-      "Servers\022-.hbase.pb.RemoveDrainFromRegion" +
-      "ServersRequest\032..hbase.pb.RemoveDrainFro" +
-      "mRegionServersResponseBI\n1org.apache.had" +
-      "oop.hbase.shaded.protobuf.generatedB\014Mas" +
-      "terProtosH\001\210\001\001\240\001\001"
+      "on\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(" +
+      "\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007proc_" +
+      "id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n\010sna" +
+      "pshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescripti" +
+      "on\"^\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001" +
+      "(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.S" +
+      "napshotDescription\"O\n\034IsRestoreSnapshotD" +
+      "oneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.",
+      "SnapshotDescription\"4\n\035IsRestoreSnapshot" +
+      "DoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033Get" +
+      "SchemaAlterStatusRequest\022\'\n\ntable_name\030\001" +
+      " \002(\0132\023.hbase.pb.TableName\"T\n\034GetSchemaAl" +
+      "terStatusResponse\022\035\n\025yet_to_update_regio" +
+      "ns\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetT" +
+      "ableDescriptorsRequest\022(\n\013table_names\030\001 " +
+      "\003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022" +
+      "!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021\n\tna" +
+      "mespace\030\004 \001(\t\"J\n\033GetTableDescriptorsResp",
+      "onse\022+\n\014table_schema\030\001 \003(\0132\025.hbase.pb.Ta" +
+      "bleSchema\"[\n\024GetTableNamesRequest\022\r\n\005reg" +
+      "ex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005fa" +
+      "lse\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNamesR" +
+      "esponse\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb." +
+      "TableName\"?\n\024GetTableStateRequest\022\'\n\ntab" +
+      "le_name\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025Ge" +
+      "tTableStateResponse\022)\n\013table_state\030\001 \002(\013" +
+      "2\024.hbase.pb.TableState\"\031\n\027GetClusterStat" +
+      "usRequest\"K\n\030GetClusterStatusResponse\022/\n",
+      "\016cluster_status\030\001 \002(\0132\027.hbase.pb.Cluster" +
+      "Status\"\030\n\026IsMasterRunningRequest\"4\n\027IsMa" +
+      "sterRunningResponse\022\031\n\021is_master_running" +
+      "\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tproced" +
+      "ure\030\001 \002(\0132\036.hbase.pb.ProcedureDescriptio" +
+      "n\"F\n\025ExecProcedureResponse\022\030\n\020expected_t" +
+      "imeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsP" +
+      "rocedureDoneRequest\0221\n\tprocedure\030\001 \001(\0132\036" +
+      ".hbase.pb.ProcedureDescription\"`\n\027IsProc" +
+      "edureDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\0220",
+      "\n\010snapshot\030\002 \001(\0132\036.hbase.pb.ProcedureDes" +
+      "cription\",\n\031GetProcedureResultRequest\022\017\n" +
+      "\007proc_id\030\001 \002(\004\"\375\001\n\032GetProcedureResultRes" +
+      "ponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetProce" +
+      "dureResultResponse.State\022\026\n\016submitted_ti" +
+      "me\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030" +
+      "\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Fore" +
+      "ignExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUN" +
+      "D\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortP" +
+      "rocedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayI",
+      "nterruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortPr" +
+      "ocedureResponse\022\034\n\024is_procedure_aborted\030" +
+      "\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListPr" +
+      "oceduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hb" +
+      "ase.pb.Procedure\"\022\n\020ListLocksRequest\"5\n\021" +
+      "ListLocksResponse\022 \n\004lock\030\001 \003(\0132\022.hbase." +
+      "pb.LockInfo\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_" +
+      "name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamesp" +
+      "ace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb" +
+      ".TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass",
+      "_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase" +
+      ".pb.ThrottleRequest\"\022\n\020SetQuotaResponse\"" +
+      "J\n\037MajorCompactionTimestampRequest\022\'\n\nta" +
+      "ble_name\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(M" +
+      "ajorCompactionTimestampForRegionRequest\022" +
+      ")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" +
+      "er\"@\n MajorCompactionTimestampResponse\022\034" +
+      "\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Security" +
+      "CapabilitiesRequest\"\354\001\n\034SecurityCapabili" +
+      "tiesResponse\022G\n\014capabilities\030\001 \003(\01621.hba",
+      "se.pb.SecurityCapabilitiesResponse.Capab" +
+      "ility\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTIC" +
+      "ATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAU" +
+      "THORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n" +
+      "\017CELL_VISIBILITY\020\004\"\"\n ListDrainingRegion" +
+      "ServersRequest\"N\n!ListDrainingRegionServ" +
+      "ersResponse\022)\n\013server_name\030\001 \003(\0132\024.hbase" +
+      ".pb.ServerName\"F\n\031DrainRegionServersRequ" +
+      "est\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb.Serv" +
+      "erName\"\034\n\032DrainRegionServersResponse\"P\n#",
+      "RemoveDrainFromRegionServersRequest\022)\n\013s" +
+      "erver_name\030\001 \003(\0132\024.hbase.pb.ServerName\"&" +
+      "\n$RemoveDrainFromRegionServersResponse*(" +
+      "\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\001" +
+      "2\2074\n\rMasterService\022e\n\024GetSchemaAlterStat" +
+      "us\022%.hbase.pb.GetSchemaAlterStatusReques" +
+      "t\032&.hbase.pb.GetSchemaAlterStatusRespons" +
+      "e\022b\n\023GetTableDescriptors\022$.hbase.pb.GetT" +
+      "ableDescriptorsRequest\032%.hbase.pb.GetTab" +
+      "leDescriptorsResponse\022P\n\rGetTableNames\022\036",
+      ".hbase.pb.GetTableNamesRequest\032\037.hbase.p" +
+      "b.GetTableNamesResponse\022Y\n\020GetClusterSta" +
+      "tus\022!.hbase.pb.GetClusterStatusRequest\032\"" +
+      ".hbase.pb.GetClusterStatusResponse\022V\n\017Is" +
+      "MasterRunning\022 .hbase.pb.IsMasterRunning" +
+      "Request\032!.hbase.pb.IsMasterRunningRespon" +
+      "se\022D\n\tAddColumn\022\032.hbase.pb.AddColumnRequ" +
+      "est\032\033.hbase.pb.AddColumnResponse\022M\n\014Dele" +
+      "teColumn\022\035.hbase.pb.DeleteColumnRequest\032" +
+      "\036.hbase.pb.DeleteColumnResponse\022M\n\014Modif",
+      "yColumn\022\035.hbase.pb.ModifyColumnRequest\032\036" +
+      ".hbase.pb.ModifyColumnResponse\022G\n\nMoveRe" +
+      "gion\022\033.hbase.pb.MoveRegionRequest\032\034.hbas" +
+      "e.pb.MoveRegionResponse\022\\\n\021MergeTableReg" +
+      "ions\022\".hbase.pb.MergeTableRegionsRequest" +
+      "\032#.hbase.pb.MergeTableRegionsResponse\022M\n" +
+      "\014AssignRegion\022\035.hbase.pb.AssignRegionReq" +
+      "uest\032\036.hbase.pb.AssignRegionResponse\022S\n\016" +
+      "UnassignRegion\022\037.hbase.pb.UnassignRegion" +
+      "Request\032 .hbase.pb.UnassignRegionRespons",
+      "e\022P\n\rOfflineRegion\022\036.hbase.pb.OfflineReg" +
+      "ionRequest\032\037.hbase.pb.OfflineRegionRespo" +
+      "nse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteTabl" +
+      "eRequest\032\035.hbase.pb.DeleteTableResponse\022" +
+      "P\n\rtruncateTable\022\036.hbase.pb.TruncateTabl" +
+      "eRequest\032\037.hbase.pb.TruncateTableRespons" +
+      "e\022J\n\013EnableTable\022\034.hbase.pb.EnableTableR" +
+      "equest\032\035.hbase.pb.EnableTableResponse\022M\n" +
+      "\014DisableTable\022\035.hbase.pb.DisableTableReq" +
+      "uest\032\036.hbase.pb.DisableTableResponse\022J\n\013",
+      "ModifyTable\022\034.hbase.pb.ModifyTableReques" +
+      "t\032\035.hbase.pb.ModifyTableResponse\022J\n\013Crea" +
+      "teTable\022\034.hbase.pb.CreateTableRequest\032\035." +
+      "hbase.pb.CreateTableResponse\022A\n\010Shutdown" +
+      "\022\031.hbase.pb.ShutdownRequest\032\032.hbase.pb.S" +
+      "hutdownResponse\022G\n\nStopMaster\022\033.hbase.pb" +
+      ".StopMasterRequest\032\034.hbase.pb.StopMaster" +
+      "Response\022h\n\031IsMasterInMaintenanceMode\022$." +
+      "hbase.pb.IsInMaintenanceModeRequest\032%.hb" +
+      "ase.pb.IsInMaintenanceModeResponse\022>\n\007Ba",
+      "lance\022\030.hbase.pb.BalanceRequest\032\031.hbase." +
+      "pb.BalanceResponse\022_\n\022SetBalancerRunning" +
+      "\022#.hbase.pb.SetBalancerRunningRequest\032$." +
+      "hbase.pb.SetBalancerRunningResponse\022\\\n\021I" +
+      "sBalancerEnabled\022\".hbase.pb.IsBalancerEn" +
+      "abledRequest\032#.hbase.pb.IsBalancerEnable" +
+      "dResponse\022k\n\026SetSplitOrMergeEnabled\022\'.hb" +
+      "ase.pb.SetSplitOrMergeEnabledRequest\032(.h" +
+      "base.pb.SetSplitOrMergeEnabledResponse\022h" +
+      "\n\025IsSplitOrMergeEnabled\022&.hbase.pb.IsSpl",
+      "itOrMergeEnabledRequest\032\'.hbase.pb.IsSpl" +
+      "itOrMergeEnabledResponse\022D\n\tNormalize\022\032." +
+      "hbase.pb.NormalizeRequest\032\033.hbase.pb.Nor" +
+      "malizeResponse\022e\n\024SetNormalizerRunning\022%" +
+      ".hbase.pb.SetNormalizerRunningRequest\032&." +
+      "hbase.pb.SetNormalizerRunningResponse\022b\n" +
+      "\023IsNormalizerEnabled\022$.hbase.pb.IsNormal" +
+      "izerEnabledRequest\032%.hbase.pb.IsNormaliz" +
+      "erEnabledResponse\022S\n\016RunCatalogScan\022\037.hb" +
+      "ase.pb.RunCatalogScanRequest\032 .hbase.pb.",
+      "RunCatalogScanResponse\022e\n\024EnableCatalogJ" +
+      "anitor\022%.hbase.pb.EnableCatalogJanitorRe" +
+      "quest\032&.hbase.pb.EnableCatalogJanitorRes" +
+      "ponse\022n\n\027IsCatalogJanitorEnabled\022(.hbase" +
+      ".pb.IsCatalogJanitorEnabledRequest\032).hba" +
+      "se.pb.IsCatalogJanitorEnabledResponse\022V\n" +
+      "\017RunCleanerChore\022 .hbase.pb.RunCleanerCh" +
+      "oreRequest\032!.hbase.pb.RunCleanerChoreRes" +
+      "ponse\022k\n\026SetCleanerChoreRunning\022\'.hbase." +
+      "pb.SetCleanerChoreRunningRequest\032(.hbase",
+      ".pb.SetCleanerChoreRunningResponse\022h\n\025Is" +
+      "CleanerChoreEnabled\022&.hbase.pb.IsCleaner" +
+      "ChoreEnabledRequest\032\'.hbase.pb.IsCleaner" +
+      "ChoreEnabledResponse\022^\n\021ExecMasterServic" +
+      "e\022#.hbase.pb.CoprocessorServiceRequest\032$" +
+      ".hbase.pb.CoprocessorServiceResponse\022A\n\010" +
+      "Snapshot\022\031.hbase.pb.SnapshotRequest\032\032.hb" +
+      "ase.pb.SnapshotResponse\022h\n\025GetCompletedS" +
+      "napshots\022&.hbase.pb.GetCompletedSnapshot" +
+      "sRequest\032\'.hbase.pb.GetCompletedSnapshot",
+      "sResponse\022S\n\016DeleteSnapshot\022\037.hbase.pb.D" +
+      "eleteSnapshotRequest\032 .hbase.pb.DeleteSn" +
+      "apshotResponse\022S\n\016IsSnapshotDone\022\037.hbase" +
+      ".pb.IsSnapshotDoneRequest\032 .hbase.pb.IsS" +
+      "napshotDoneResponse\022V\n\017RestoreSnapshot\022 " +
+      ".hbase.pb.RestoreSnapshotRequest\032!.hbase" +
+      ".pb.RestoreSnapshotResponse\022P\n\rExecProce" +
+      "dure\022\036.hbase.pb.ExecProcedureRequest\032\037.h" +
+      "base.pb.ExecProcedureResponse\022W\n\024ExecPro" +
+      "cedureWithRet\022\036.hbase.pb.ExecProcedureRe",
+      "quest\032\037.hbase.pb.ExecProcedureResponse\022V" +
+      "\n\017IsProcedureDone\022 .hbase.pb.IsProcedure" +
+      "DoneRequest\032!.hbase.pb.IsProcedureDoneRe" +
+      "sponse\022V\n\017ModifyNamespace\022 .hbase.pb.Mod" +
+      "ifyNamespaceRequest\032!.hbase.pb.ModifyNam" +
+      "espaceResponse\022V\n\017CreateNamespace\022 .hbas" +
+      "e.pb.CreateNamespaceRequest\032!.hbase.pb.C" +
+      "reateNamespaceResponse\022V\n\017DeleteNamespac" +
+      "e\022 .hbase.pb.DeleteNamespaceRequest\032!.hb" +
+      "ase.pb.DeleteNamespaceResponse\022k\n\026GetNam",
+      "espaceDescriptor\022\'.hbase.pb.GetNamespace" +
+      "DescriptorRequest\032(.hbase.pb.GetNamespac" +
+      "eDescriptorResponse\022q\n\030ListNamespaceDesc" +
+      "riptors\022).hbase.pb.ListNamespaceDescript" +
+      "orsRequest\032*.hbase.pb.ListNamespaceDescr" +
+      "iptorsResponse\022\206\001\n\037ListTableDescriptorsB" +
+      "yNamespace\0220.hbase.pb.ListTableDescripto" +
+      "rsByNamespaceRequest\0321.hbase.pb.ListTabl" +
+      "eDescriptorsByNamespaceResponse\022t\n\031ListT" +
+      "ableNamesByNamespace\022*.hbase.pb.ListTabl",
+      "eNamesByNamespaceRequest\032+.hbase.pb.List" +
+      "TableNamesByNamespaceResponse\022P\n\rGetTabl" +
+      "eState\022\036.hbase.pb.GetTableStateRequest\032\037" +
+      ".hbase.pb.GetTableStateResponse\022A\n\010SetQu" +
+      "ota\022\031.hbase.pb.SetQuotaRequest\032\032.hbase.p" +
+      "b.SetQuotaResponse\022x\n\037getLastMajorCompac" +
+      "tionTimestamp\022).hbase.pb.MajorCompaction" +
+      "TimestampRequest\032*.hbase.pb.MajorCompact" +
+      "ionTimestampResponse\022\212\001\n(getLastMajorCom" +
+      "pactionTimestampForRegion\0222.hbase.pb.Maj",
+      "orCompactionTimestampForRegionRequest\032*." +
+      "hbase.pb.MajorCompactionTimestampRespons" +
+      "e\022_\n\022getProcedureResult\022#.hbase.pb.GetPr" +
+      "ocedureResultRequest\032$.hbase.pb.GetProce" +
+      "dureResultResponse\022h\n\027getSecurityCapabil" +
+      "ities\022%.hbase.pb.SecurityCapabilitiesReq" +
+      "uest\032&.hbase.pb.SecurityCapabilitiesResp" +
+      "onse\022S\n\016AbortProcedure\022\037.hbase.pb.AbortP" +
+      "rocedureRequest\032 .hbase.pb.AbortProcedur" +
+      "eResponse\022S\n\016ListProcedures\022\037.hbase.pb.L",
+      "istProceduresRequest\032 .hbase.pb.ListProc" +
+      "eduresResponse\022D\n\tListLocks\022\032.hbase.pb.L" +
+      "istLocksRequest\032\033.hbase.pb.ListLocksResp" +
+      "onse\022_\n\022AddReplicationPeer\022#.hbase.pb.Ad" +
+      "dReplicationPeerRequest\032$.hbase.pb.AddRe" +
+      "plicationPeerResponse\022h\n\025RemoveReplicati" +
+      "onPeer\022&.hbase.pb.RemoveReplicationPeerR" +
+      "equest\032\'.hbase.pb.RemoveReplicationPeerR" +
+      "esponse\022h\n\025EnableReplicationPeer\022&.hbase" +
+      ".pb.EnableReplicationPeerRequest\032\'.hbase",
+      ".pb.EnableReplicationPeerResponse\022k\n\026Dis" +
+      "ableReplicationPeer\022\'.hbase.pb.DisableRe" +
+      "plicationPeerRequest\032(.hbase.pb.DisableR" +
+      "eplicationPeerResponse\022q\n\030GetReplication" +
+      "PeerConfig\022).hbase.pb.GetReplicationPeer" +
+      "ConfigRequest\032*.hbase.pb.GetReplicationP" +
+      "eerConfigResponse\022z\n\033UpdateReplicationPe" +
+      "erConfig\022,.hbase.pb.UpdateReplicationPee" +
+      "rConfigRequest\032-.hbase.pb.UpdateReplicat" +
+      "ionPeerConfigResponse\022e\n\024ListReplication",
+      "Peers\022%.hbase.pb.ListReplicationPeersReq" +
+      "uest\032&.hbase.pb.ListReplicationPeersResp" +
+      "onse\022t\n\031listDrainingRegionServers\022*.hbas" +
+      "e.pb.ListDrainingRegionServersRequest\032+." +
+      "hbase.pb.ListDrainingRegionServersRespon" +
+      "se\022_\n\022drainRegionServers\022#.hbase.pb.Drai" +
+      "nRegionServersRequest\032$.hbase.pb.DrainRe" +
+      "gionServersResponse\022}\n\034removeDrainFromRe" +
+      "gionServers\022-.hbase.pb.RemoveDrainFromRe" +
+      "gionServersRequest\032..hbase.pb.RemoveDrai",
+      "nFromRegionServersResponseBI\n1org.apache" +
+      ".hadoop.hbase.shaded.protobuf.generatedB" +
+      "\014MasterProtosH\001\210\001\001\240\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
@@ -76853,6 +78065,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.getDescriptor(),
           org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.getDescriptor(),
           org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.getDescriptor(),
+          org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.getDescriptor(),
           org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor(),
           org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor(),
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.getDescriptor(),
@@ -77517,80 +78730,92 @@ public final class MasterProtos {
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_ListProceduresResponse_descriptor,
         new java.lang.String[] { "Procedure", });
-    internal_static_hbase_pb_SetQuotaRequest_descriptor =
+    internal_static_hbase_pb_ListLocksRequest_descriptor =
       getDescriptor().getMessageTypes().get(110);
+    internal_static_hbase_pb_ListLocksRequest_fieldAccessorTable = new
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+        internal_static_hbase_pb_ListLocksRequest_descriptor,
+        new java.lang.String[] { });
+    internal_static_hbase_pb_ListLocksResponse_descriptor =
+      getDescriptor().getMessageTypes().get(111);
+    internal_static_hbase_pb_ListLocksResponse_fieldAccessorTable = new
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+        internal_static_hbase_pb_ListLocksResponse_descriptor,
+        new java.lang.String[] { "Lock", });
+    internal_static_hbase_pb_SetQuotaRequest_descriptor =
+      getDescriptor().getMessageTypes().get(112);
     internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_SetQuotaRequest_descriptor,
         new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", });
     internal_static_hbase_pb_SetQuotaResponse_descriptor =
-      getDescriptor().getMessageTypes().get(111);
+      getDescriptor().getMessageTypes().get(113);
     internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_SetQuotaResponse_descriptor,
         new java.lang.String[] { });
     internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor =
-      getDescriptor().getMessageTypes().get(112);
+      getDescriptor().getMessageTypes().get(114);
     internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor,
         new java.lang.String[] { "TableName", });
     internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor =
-      getDescriptor().getMessageTypes().get(113);
+      getDescriptor().getMessageTypes().get(115);
     internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor,
         new java.lang.String[] { "Region", });
     internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor =
-      getDescriptor().getMessageTypes().get(114);
+      getDescriptor().getMessageTypes().get(116);
     internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor,
         new java.lang.String[] { "CompactionTimestamp", });
     internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor =
-      getDescriptor().getMessageTypes().get(115);
+      getDescriptor().getMessageTypes().get(117);
     internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor,
         new java.lang.String[] { });
     internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor =
-      getDescriptor().getMessageTypes().get(116);
+      getDescriptor().getMessageTypes().get(118);
     internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor,
         new java.lang.String[] { "Capabilities", });
     internal_static_hbase_pb_ListDrainingRegionServersRequest_descriptor =
-      getDescriptor().getMessageTypes().get(117);
+      getDescriptor().getMessageTypes().get(119);
     internal_static_hbase_pb_ListDrainingRegionServersRequest_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_ListDrainingRegionServersRequest_descriptor,
         new java.lang.String[] { });
     internal_static_hbase_pb_ListDrainingRegionServersResponse_descriptor =
-      getDescriptor().getMessageTypes().get(118);
+      getDescriptor().getMessageTypes().get(120);
     internal_static_hbase_pb_ListDrainingRegionServersResponse_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_ListDrainingRegionServersResponse_descriptor,
         new java.lang.String[] { "ServerName", });
     internal_static_hbase_pb_DrainRegionServersRequest_descriptor =
-      getDescriptor().getMessageTypes().get(119);
+      getDescriptor().getMessageTypes().get(121);
     internal_static_hbase_pb_DrainRegionServersRequest_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_DrainRegionServersRequest_descriptor,
         new java.lang.String[] { "ServerName", });
     internal_static_hbase_pb_DrainRegionServersResponse_descriptor =
-      getDescriptor().getMessageTypes().get(120);
+      getDescriptor().getMessageTypes().get(122);
     internal_static_hbase_pb_DrainRegionServersResponse_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_DrainRegionServersResponse_descriptor,
         new java.lang.String[] { });
     internal_static_hbase_pb_RemoveDrainFromRegionServersRequest_descriptor =
-      getDescriptor().getMessageTypes().get(121);
+      getDescriptor().getMessageTypes().get(123);
     internal_static_hbase_pb_RemoveDrainFromRegionServersRequest_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_RemoveDrainFromRegionServersRequest_descriptor,
         new java.lang.String[] { "ServerName", });
     internal_static_hbase_pb_RemoveDrainFromRegionServersResponse_descriptor =
-      getDescriptor().getMessageTypes().get(122);
+      getDescriptor().getMessageTypes().get(124);
     internal_static_hbase_pb_RemoveDrainFromRegionServersResponse_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_RemoveDrainFromRegionServersResponse_descriptor,
@@ -77599,6 +78824,7 @@ public final class MasterProtos {
     org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.getDescriptor();
     org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.getDescriptor();
     org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.getDescriptor();
+    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.getDescriptor();
     org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor();
     org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor();
     org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.getDescriptor();
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index 0df7f2eb74d..1898e687940 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -25,6 +25,7 @@ option java_generate_equals_and_hash = true;
 option optimize_for = SPEED;
 
 import "HBase.proto";
+import "Procedure.proto";
 
 enum LockType {
   EXCLUSIVE = 1;
@@ -70,6 +71,27 @@ message LockProcedureData {
   optional bool is_master_lock = 6 [default = false];
 }
 
+enum ResourceType {
+  RESOURCE_TYPE_SERVER = 1;
+  RESOURCE_TYPE_NAMESPACE = 2;
+  RESOURCE_TYPE_TABLE = 3;
+  RESOURCE_TYPE_REGION = 4;
+}
+
+message WaitingProcedure {
+  required LockType lock_type = 1;
+  required Procedure procedure = 2;
+}
+
+message LockInfo {
+  required ResourceType resource_type = 1;
+  optional string resource_name = 2;
+  required LockType lock_type = 3;
+  optional Procedure exclusive_lock_owner_procedure = 4;
+  optional int32 shared_lock_count = 5;
+  repeated WaitingProcedure waitingProcedures = 6;
+}
+
 service LockService {
   /** Acquire lock on namespace/table/region */
   rpc RequestLock(LockRequest) returns(LockResponse);
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index d7d51e27d63..0c3da025f27 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -30,6 +30,7 @@ import "HBase.proto";
 import "Client.proto";
 import "ClusterStatus.proto";
 import "ErrorHandling.proto";
+import "LockService.proto";
 import "Procedure.proto";
 import "Quota.proto";
 import "Replication.proto";
@@ -534,6 +535,13 @@ message ListProceduresResponse {
   repeated Procedure procedure = 1;
 }
 
+message ListLocksRequest {
+}
+
+message ListLocksResponse {
+  repeated LockInfo lock = 1;
+}
+
 message SetQuotaRequest {
   optional string user_name = 1;
   optional string user_group = 2;
@@ -888,6 +896,9 @@ service MasterService {
   rpc ListProcedures(ListProceduresRequest)
     returns(ListProceduresResponse);
 
+  rpc ListLocks(ListLocksRequest)
+    returns(ListLocksResponse);
+
   /** Add a replication peer */
   rpc AddReplicationPeer(AddReplicationPeerRequest)
     returns(AddReplicationPeerResponse);
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 36d5112d1b9..e1a47c54870 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -125,7 +125,7 @@ AssignmentManager assignmentManager = master.getAssignmentManager();