HBASE-15143 Procedure v2 - Web UI displaying queues
Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
parent
1367519cd0
commit
2557506415
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.replication.TableCFs;
|
import org.apache.hadoop.hbase.client.replication.TableCFs;
|
||||||
import org.apache.hadoop.hbase.client.security.SecurityCapability;
|
import org.apache.hadoop.hbase.client.security.SecurityCapability;
|
||||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaFilter;
|
import org.apache.hadoop.hbase.quotas.QuotaFilter;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaRetriever;
|
import org.apache.hadoop.hbase.quotas.QuotaRetriever;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaSettings;
|
import org.apache.hadoop.hbase.quotas.QuotaSettings;
|
||||||
|
@ -1249,6 +1250,14 @@ public interface Admin extends Abortable, Closeable {
|
||||||
ProcedureInfo[] listProcedures()
|
ProcedureInfo[] listProcedures()
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List locks.
|
||||||
|
* @return lock list
|
||||||
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
*/
|
||||||
|
LockInfo[] listLocks()
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file.
|
* Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file.
|
||||||
*
|
*
|
||||||
|
|
|
@ -25,8 +25,6 @@ import static org.apache.hadoop.hbase.client.MetricsConnection.CLIENT_SIDE_METRI
|
||||||
import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
|
import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
|
||||||
import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsentEx;
|
import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsentEx;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InterruptedIOException;
|
import java.io.InterruptedIOException;
|
||||||
|
@ -120,6 +118,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
import edu.umd.cs.findbugs.annotations.Nullable;
|
import edu.umd.cs.findbugs.annotations.Nullable;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1282,6 +1282,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
||||||
return stub.listProcedures(controller, request);
|
return stub.listProcedures(controller, request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public MasterProtos.ListLocksResponse listLocks(
|
||||||
|
RpcController controller,
|
||||||
|
MasterProtos.ListLocksRequest request) throws ServiceException {
|
||||||
|
return stub.listLocks(controller, request);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MasterProtos.AddColumnResponse addColumn(
|
public MasterProtos.AddColumnResponse addColumn(
|
||||||
RpcController controller,
|
RpcController controller,
|
||||||
|
|
|
@ -25,7 +25,6 @@ import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -80,6 +79,7 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
||||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
||||||
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
||||||
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaFilter;
|
import org.apache.hadoop.hbase.quotas.QuotaFilter;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaRetriever;
|
import org.apache.hadoop.hbase.quotas.QuotaRetriever;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaSettings;
|
import org.apache.hadoop.hbase.quotas.QuotaSettings;
|
||||||
|
@ -110,6 +110,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringP
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
|
||||||
|
@ -151,6 +152,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedur
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
|
||||||
|
@ -191,7 +194,6 @@ import org.apache.hadoop.hbase.util.Addressing;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
|
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
|
||||||
import org.apache.hadoop.hbase.util.NonceKey;
|
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
|
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
|
||||||
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||||
|
@ -201,7 +203,6 @@ import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.collect.Lists;
|
|
||||||
import com.google.protobuf.Descriptors;
|
import com.google.protobuf.Descriptors;
|
||||||
import com.google.protobuf.Message;
|
import com.google.protobuf.Message;
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
|
@ -2096,26 +2097,33 @@ public class HBaseAdmin implements Admin {
|
||||||
getRpcController(), ListProceduresRequest.newBuilder().build()).getProcedureList();
|
getRpcController(), ListProceduresRequest.newBuilder().build()).getProcedureList();
|
||||||
ProcedureInfo[] procInfoList = new ProcedureInfo[procList.size()];
|
ProcedureInfo[] procInfoList = new ProcedureInfo[procList.size()];
|
||||||
for (int i = 0; i < procList.size(); i++) {
|
for (int i = 0; i < procList.size(); i++) {
|
||||||
procInfoList[i] = convert(procList.get(i));
|
procInfoList[i] = ProtobufUtil.toProcedureInfo(procList.get(i));
|
||||||
}
|
}
|
||||||
return procInfoList;
|
return procInfoList;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
private static ProcedureInfo convert(final ProcedureProtos.Procedure procProto) {
|
@Override
|
||||||
NonceKey nonceKey = null;
|
public LockInfo[] listLocks() throws IOException {
|
||||||
if (procProto.getNonce() != HConstants.NO_NONCE) {
|
return executeCallable(new MasterCallable<LockInfo[]>(getConnection(),
|
||||||
nonceKey = new NonceKey(procProto.getNonceGroup(), procProto.getNonce());
|
getRpcControllerFactory()) {
|
||||||
}
|
@Override
|
||||||
org.apache.hadoop.hbase.ProcedureState procedureState =
|
protected LockInfo[] rpcCall() throws Exception {
|
||||||
org.apache.hadoop.hbase.ProcedureState.valueOf(procProto.getState().name());
|
ListLocksRequest request = ListLocksRequest.newBuilder().build();
|
||||||
return new ProcedureInfo(procProto.getProcId(), procProto.getClassName(), procProto.getOwner(),
|
ListLocksResponse response = master.listLocks(getRpcController(), request);
|
||||||
procedureState, procProto.hasParentId() ? procProto.getParentId() : -1, nonceKey,
|
List<LockServiceProtos.LockInfo> locksProto = response.getLockList();
|
||||||
procProto.hasException()?
|
|
||||||
ForeignExceptionUtil.toIOException(procProto.getException()): null,
|
LockInfo[] locks = new LockInfo[locksProto.size()];
|
||||||
procProto.getLastUpdate(), procProto.getSubmittedTime(),
|
|
||||||
procProto.hasResult()? procProto.getResult().toByteArray() : null);
|
for (int i = 0; i < locks.length; i++) {
|
||||||
|
LockServiceProtos.LockInfo lockProto = locksProto.get(i);
|
||||||
|
locks[i] = ProtobufUtil.toLockInfo(lockProto);
|
||||||
|
}
|
||||||
|
|
||||||
|
return locks;
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -190,6 +190,12 @@ public class ShortCircuitMasterConnection implements MasterKeepAliveConnection {
|
||||||
return stub.listProcedures(controller, request);
|
return stub.listProcedures(controller, request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ListLocksResponse listLocks(RpcController controller,
|
||||||
|
ListLocksRequest request) throws ServiceException {
|
||||||
|
return stub.listLocks(controller, request);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller,
|
public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller,
|
||||||
ListNamespaceDescriptorsRequest request) throws ServiceException {
|
ListNamespaceDescriptorsRequest request) throws ServiceException {
|
||||||
|
|
|
@ -53,6 +53,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.ProcedureInfo;
|
||||||
|
import org.apache.hadoop.hbase.ProcedureState;
|
||||||
import org.apache.hadoop.hbase.ServerLoad;
|
import org.apache.hadoop.hbase.ServerLoad;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
@ -82,6 +84,7 @@ import org.apache.hadoop.hbase.filter.Filter;
|
||||||
import org.apache.hadoop.hbase.io.LimitInputStream;
|
import org.apache.hadoop.hbase.io.LimitInputStream;
|
||||||
import org.apache.hadoop.hbase.io.TimeRange;
|
import org.apache.hadoop.hbase.io.TimeRange;
|
||||||
import org.apache.hadoop.hbase.master.RegionState;
|
import org.apache.hadoop.hbase.master.RegionState;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
|
import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaScope;
|
import org.apache.hadoop.hbase.quotas.QuotaScope;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaType;
|
import org.apache.hadoop.hbase.quotas.QuotaType;
|
||||||
|
@ -145,11 +148,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
|
||||||
|
@ -166,7 +172,9 @@ import org.apache.hadoop.hbase.util.Addressing;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.DynamicClassLoader;
|
import org.apache.hadoop.hbase.util.DynamicClassLoader;
|
||||||
import org.apache.hadoop.hbase.util.ExceptionUtil;
|
import org.apache.hadoop.hbase.util.ExceptionUtil;
|
||||||
|
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
|
||||||
import org.apache.hadoop.hbase.util.Methods;
|
import org.apache.hadoop.hbase.util.Methods;
|
||||||
|
import org.apache.hadoop.hbase.util.NonceKey;
|
||||||
import org.apache.hadoop.hbase.util.VersionInfo;
|
import org.apache.hadoop.hbase.util.VersionInfo;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
|
||||||
|
@ -3262,4 +3270,177 @@ public final class ProtobufUtil {
|
||||||
int port = Addressing.parsePort(str);
|
int port = Addressing.parsePort(str);
|
||||||
return ServerName.valueOf(hostname, port, -1L);
|
return ServerName.valueOf(hostname, port, -1L);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
/**
|
||||||
|
* @return Convert the current {@link ProcedureInfo} into a Protocol Buffers Procedure
|
||||||
|
* instance.
|
||||||
|
*/
|
||||||
|
public static ProcedureProtos.Procedure toProtoProcedure(ProcedureInfo procedure) {
|
||||||
|
ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder();
|
||||||
|
|
||||||
|
builder.setClassName(procedure.getProcName());
|
||||||
|
builder.setProcId(procedure.getProcId());
|
||||||
|
builder.setSubmittedTime(procedure.getSubmittedTime());
|
||||||
|
builder.setState(ProcedureProtos.ProcedureState.valueOf(procedure.getProcState().name()));
|
||||||
|
builder.setLastUpdate(procedure.getLastUpdate());
|
||||||
|
|
||||||
|
if (procedure.hasParentId()) {
|
||||||
|
builder.setParentId(procedure.getParentId());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (procedure.hasOwner()) {
|
||||||
|
builder.setOwner(procedure.getProcOwner());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (procedure.isFailed()) {
|
||||||
|
builder.setException(ForeignExceptionUtil.toProtoForeignException(procedure.getException()));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (procedure.hasResultData()) {
|
||||||
|
builder.setResult(UnsafeByteOperations.unsafeWrap(procedure.getResult()));
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper to convert the protobuf object.
|
||||||
|
* @return Convert the current Protocol Buffers Procedure to {@link ProcedureInfo}
|
||||||
|
* instance.
|
||||||
|
*/
|
||||||
|
public static ProcedureInfo toProcedureInfo(ProcedureProtos.Procedure procedureProto) {
|
||||||
|
NonceKey nonceKey = null;
|
||||||
|
|
||||||
|
if (procedureProto.getNonce() != HConstants.NO_NONCE) {
|
||||||
|
nonceKey = new NonceKey(procedureProto.getNonceGroup(), procedureProto.getNonce());
|
||||||
|
}
|
||||||
|
|
||||||
|
return new ProcedureInfo(procedureProto.getProcId(), procedureProto.getClassName(),
|
||||||
|
procedureProto.hasOwner() ? procedureProto.getOwner() : null,
|
||||||
|
ProcedureState.valueOf(procedureProto.getState().name()),
|
||||||
|
procedureProto.hasParentId() ? procedureProto.getParentId() : -1, nonceKey,
|
||||||
|
procedureProto.hasException() ?
|
||||||
|
ForeignExceptionUtil.toIOException(procedureProto.getException()) : null,
|
||||||
|
procedureProto.getLastUpdate(), procedureProto.getSubmittedTime(),
|
||||||
|
procedureProto.hasResult() ? procedureProto.getResult().toByteArray() : null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LockServiceProtos.ResourceType toProtoResourceType(
|
||||||
|
LockInfo.ResourceType resourceType) {
|
||||||
|
switch (resourceType) {
|
||||||
|
case SERVER:
|
||||||
|
return LockServiceProtos.ResourceType.RESOURCE_TYPE_SERVER;
|
||||||
|
case NAMESPACE:
|
||||||
|
return LockServiceProtos.ResourceType.RESOURCE_TYPE_NAMESPACE;
|
||||||
|
case TABLE:
|
||||||
|
return LockServiceProtos.ResourceType.RESOURCE_TYPE_TABLE;
|
||||||
|
case REGION:
|
||||||
|
return LockServiceProtos.ResourceType.RESOURCE_TYPE_REGION;
|
||||||
|
default:
|
||||||
|
throw new IllegalArgumentException("Unknown resource type: " + resourceType);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LockInfo.ResourceType toResourceType(
|
||||||
|
LockServiceProtos.ResourceType resourceTypeProto) {
|
||||||
|
switch (resourceTypeProto) {
|
||||||
|
case RESOURCE_TYPE_SERVER:
|
||||||
|
return LockInfo.ResourceType.SERVER;
|
||||||
|
case RESOURCE_TYPE_NAMESPACE:
|
||||||
|
return LockInfo.ResourceType.NAMESPACE;
|
||||||
|
case RESOURCE_TYPE_TABLE:
|
||||||
|
return LockInfo.ResourceType.TABLE;
|
||||||
|
case RESOURCE_TYPE_REGION:
|
||||||
|
return LockInfo.ResourceType.REGION;
|
||||||
|
default:
|
||||||
|
throw new IllegalArgumentException("Unknown resource type: " + resourceTypeProto);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LockServiceProtos.LockType toProtoLockType(
|
||||||
|
LockInfo.LockType lockType) {
|
||||||
|
return LockServiceProtos.LockType.valueOf(lockType.name());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LockInfo.LockType toLockType(
|
||||||
|
LockServiceProtos.LockType lockTypeProto) {
|
||||||
|
return LockInfo.LockType.valueOf(lockTypeProto.name());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LockServiceProtos.WaitingProcedure toProtoWaitingProcedure(
|
||||||
|
LockInfo.WaitingProcedure waitingProcedure) {
|
||||||
|
LockServiceProtos.WaitingProcedure.Builder builder = LockServiceProtos.WaitingProcedure.newBuilder();
|
||||||
|
|
||||||
|
ProcedureProtos.Procedure procedureProto =
|
||||||
|
toProtoProcedure(waitingProcedure.getProcedure());
|
||||||
|
|
||||||
|
builder
|
||||||
|
.setLockType(toProtoLockType(waitingProcedure.getLockType()))
|
||||||
|
.setProcedure(procedureProto);
|
||||||
|
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LockInfo.WaitingProcedure toWaitingProcedure(
|
||||||
|
LockServiceProtos.WaitingProcedure waitingProcedureProto) {
|
||||||
|
LockInfo.WaitingProcedure waiting = new LockInfo.WaitingProcedure();
|
||||||
|
|
||||||
|
waiting.setLockType(toLockType(waitingProcedureProto.getLockType()));
|
||||||
|
|
||||||
|
ProcedureInfo procedure =
|
||||||
|
toProcedureInfo(waitingProcedureProto.getProcedure());
|
||||||
|
waiting.setProcedure(procedure);
|
||||||
|
|
||||||
|
return waiting;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LockServiceProtos.LockInfo toProtoLockInfo(LockInfo lock)
|
||||||
|
{
|
||||||
|
LockServiceProtos.LockInfo.Builder builder = LockServiceProtos.LockInfo.newBuilder();
|
||||||
|
|
||||||
|
builder
|
||||||
|
.setResourceType(toProtoResourceType(lock.getResourceType()))
|
||||||
|
.setResourceName(lock.getResourceName())
|
||||||
|
.setLockType(toProtoLockType(lock.getLockType()));
|
||||||
|
|
||||||
|
ProcedureInfo exclusiveLockOwnerProcedure = lock.getExclusiveLockOwnerProcedure();
|
||||||
|
|
||||||
|
if (exclusiveLockOwnerProcedure != null) {
|
||||||
|
Procedure exclusiveLockOwnerProcedureProto =
|
||||||
|
toProtoProcedure(lock.getExclusiveLockOwnerProcedure());
|
||||||
|
builder.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureProto);
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.setSharedLockCount(lock.getSharedLockCount());
|
||||||
|
|
||||||
|
for (LockInfo.WaitingProcedure waitingProcedure : lock.getWaitingProcedures()) {
|
||||||
|
builder.addWaitingProcedures(toProtoWaitingProcedure(waitingProcedure));
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LockInfo toLockInfo(LockServiceProtos.LockInfo lockProto)
|
||||||
|
{
|
||||||
|
LockInfo lock = new LockInfo();
|
||||||
|
|
||||||
|
lock.setResourceType(toResourceType(lockProto.getResourceType()));
|
||||||
|
lock.setResourceName(lockProto.getResourceName());
|
||||||
|
lock.setLockType(toLockType(lockProto.getLockType()));
|
||||||
|
|
||||||
|
if (lockProto.hasExclusiveLockOwnerProcedure()) {
|
||||||
|
ProcedureInfo exclusiveLockOwnerProcedureProto =
|
||||||
|
toProcedureInfo(lockProto.getExclusiveLockOwnerProcedure());
|
||||||
|
|
||||||
|
lock.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureProto);
|
||||||
|
}
|
||||||
|
|
||||||
|
lock.setSharedLockCount(lockProto.getSharedLockCount());
|
||||||
|
|
||||||
|
for (LockServiceProtos.WaitingProcedure waitingProcedureProto : lockProto.getWaitingProceduresList()) {
|
||||||
|
lock.addWaitingProcedure(toWaitingProcedure(waitingProcedureProto));
|
||||||
|
}
|
||||||
|
|
||||||
|
return lock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,128 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hbase.procedure2;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.ProcedureInfo;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
public class LockInfo {
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
public enum ResourceType {
|
||||||
|
SERVER, NAMESPACE, TABLE, REGION
|
||||||
|
}
|
||||||
|
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
public enum LockType {
|
||||||
|
EXCLUSIVE, SHARED
|
||||||
|
}
|
||||||
|
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
public static class WaitingProcedure {
|
||||||
|
private LockType lockType;
|
||||||
|
private ProcedureInfo procedure;
|
||||||
|
|
||||||
|
public WaitingProcedure() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public LockType getLockType() {
|
||||||
|
return lockType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLockType(LockType lockType) {
|
||||||
|
this.lockType = lockType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ProcedureInfo getProcedure() {
|
||||||
|
return procedure;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setProcedure(ProcedureInfo procedure) {
|
||||||
|
this.procedure = procedure;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private ResourceType resourceType;
|
||||||
|
private String resourceName;
|
||||||
|
private LockType lockType;
|
||||||
|
private ProcedureInfo exclusiveLockOwnerProcedure;
|
||||||
|
private int sharedLockCount;
|
||||||
|
private final List<WaitingProcedure> waitingProcedures;
|
||||||
|
|
||||||
|
public LockInfo() {
|
||||||
|
waitingProcedures = new ArrayList<>();
|
||||||
|
}
|
||||||
|
|
||||||
|
public ResourceType getResourceType() {
|
||||||
|
return resourceType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setResourceType(ResourceType resourceType) {
|
||||||
|
this.resourceType = resourceType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getResourceName() {
|
||||||
|
return resourceName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setResourceName(String resourceName) {
|
||||||
|
this.resourceName = resourceName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public LockType getLockType() {
|
||||||
|
return lockType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLockType(LockType lockType) {
|
||||||
|
this.lockType = lockType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ProcedureInfo getExclusiveLockOwnerProcedure() {
|
||||||
|
return exclusiveLockOwnerProcedure;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setExclusiveLockOwnerProcedure(
|
||||||
|
ProcedureInfo exclusiveLockOwnerProcedure) {
|
||||||
|
this.exclusiveLockOwnerProcedure = exclusiveLockOwnerProcedure;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getSharedLockCount() {
|
||||||
|
return sharedLockCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setSharedLockCount(int sharedLockCount) {
|
||||||
|
this.sharedLockCount = sharedLockCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<WaitingProcedure> getWaitingProcedures() {
|
||||||
|
return waitingProcedures;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setWaitingProcedures(List<WaitingProcedure> waitingProcedures) {
|
||||||
|
this.waitingProcedures.clear();
|
||||||
|
this.waitingProcedures.addAll(waitingProcedures);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void addWaitingProcedure(WaitingProcedure waitingProcedure) {
|
||||||
|
waitingProcedures.add(waitingProcedure);
|
||||||
|
}
|
||||||
|
}
|
|
@ -43,7 +43,7 @@ package org.apache.hadoop.hbase.procedure2;
|
||||||
* We do not use ReentrantReadWriteLock directly because of its high memory overhead.
|
* We do not use ReentrantReadWriteLock directly because of its high memory overhead.
|
||||||
*/
|
*/
|
||||||
public class LockAndQueue extends ProcedureDeque implements LockStatus {
|
public class LockAndQueue extends ProcedureDeque implements LockStatus {
|
||||||
private long exclusiveLockProcIdOwner = Long.MIN_VALUE;
|
private Procedure<?> exclusiveLockOwnerProcedure = null;
|
||||||
private int sharedLock = 0;
|
private int sharedLock = 0;
|
||||||
|
|
||||||
// ======================================================================
|
// ======================================================================
|
||||||
|
@ -57,12 +57,12 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hasExclusiveLock() {
|
public boolean hasExclusiveLock() {
|
||||||
return this.exclusiveLockProcIdOwner != Long.MIN_VALUE;
|
return this.exclusiveLockOwnerProcedure != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isLockOwner(long procId) {
|
public boolean isLockOwner(long procId) {
|
||||||
return exclusiveLockProcIdOwner == procId;
|
return getExclusiveLockProcIdOwner() == procId;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -75,9 +75,18 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus {
|
||||||
return isLockOwner(proc.getProcId()) || hasParentLock(proc);
|
return isLockOwner(proc.getProcId()) || hasParentLock(proc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Procedure<?> getExclusiveLockOwnerProcedure() {
|
||||||
|
return exclusiveLockOwnerProcedure;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getExclusiveLockProcIdOwner() {
|
public long getExclusiveLockProcIdOwner() {
|
||||||
return exclusiveLockProcIdOwner;
|
if (exclusiveLockOwnerProcedure == null) {
|
||||||
|
return Long.MIN_VALUE;
|
||||||
|
} else {
|
||||||
|
return exclusiveLockOwnerProcedure.getProcId();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -101,7 +110,7 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus {
|
||||||
|
|
||||||
public boolean tryExclusiveLock(final Procedure proc) {
|
public boolean tryExclusiveLock(final Procedure proc) {
|
||||||
if (isLocked()) return hasLockAccess(proc);
|
if (isLocked()) return hasLockAccess(proc);
|
||||||
exclusiveLockProcIdOwner = proc.getProcId();
|
exclusiveLockOwnerProcedure = proc;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,7 +119,7 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus {
|
||||||
*/
|
*/
|
||||||
public boolean releaseExclusiveLock(final Procedure proc) {
|
public boolean releaseExclusiveLock(final Procedure proc) {
|
||||||
if (isLockOwner(proc.getProcId())) {
|
if (isLockOwner(proc.getProcId())) {
|
||||||
exclusiveLockProcIdOwner = Long.MIN_VALUE;
|
exclusiveLockOwnerProcedure = null;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -29,6 +29,7 @@ public interface LockStatus {
|
||||||
boolean isLockOwner(long procId);
|
boolean isLockOwner(long procId);
|
||||||
boolean hasParentLock(final Procedure proc);
|
boolean hasParentLock(final Procedure proc);
|
||||||
boolean hasLockAccess(final Procedure proc);
|
boolean hasLockAccess(final Procedure proc);
|
||||||
|
Procedure<?> getExclusiveLockOwnerProcedure();
|
||||||
long getExclusiveLockProcIdOwner();
|
long getExclusiveLockProcIdOwner();
|
||||||
int getSharedLockCount();
|
int getSharedLockCount();
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.procedure2;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
@ -120,6 +121,12 @@ public interface ProcedureScheduler {
|
||||||
*/
|
*/
|
||||||
boolean waitEvent(ProcedureEvent event, Procedure procedure);
|
boolean waitEvent(ProcedureEvent event, Procedure procedure);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List lock queues.
|
||||||
|
* @return the locks
|
||||||
|
*/
|
||||||
|
List<LockInfo> listLocks();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the number of elements in this queue.
|
* Returns the number of elements in this queue.
|
||||||
* @return the number of elements in this queue.
|
* @return the number of elements in this queue.
|
||||||
|
|
|
@ -17,8 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.procedure2;
|
package org.apache.hadoop.hbase.procedure2;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.reflect.Constructor;
|
import java.lang.reflect.Constructor;
|
||||||
import java.lang.reflect.Modifier;
|
import java.lang.reflect.Modifier;
|
||||||
|
@ -33,6 +31,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
|
||||||
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
|
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
|
||||||
import org.apache.hadoop.hbase.util.NonceKey;
|
import org.apache.hadoop.hbase.util.NonceKey;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Helper to convert to/from ProcedureProtos
|
* Helper to convert to/from ProcedureProtos
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -18,10 +18,13 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.procedure2;
|
package org.apache.hadoop.hbase.procedure2;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Simple scheduler for procedures
|
* Simple scheduler for procedures
|
||||||
*/
|
*/
|
||||||
|
@ -73,4 +76,9 @@ public class SimpleProcedureScheduler extends AbstractProcedureScheduler {
|
||||||
@Override
|
@Override
|
||||||
public void completionCleanup(Procedure proc) {
|
public void completionCleanup(Procedure proc) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<LockInfo> listLocks() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
}
|
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -25,6 +25,7 @@ option java_generate_equals_and_hash = true;
|
||||||
option optimize_for = SPEED;
|
option optimize_for = SPEED;
|
||||||
|
|
||||||
import "HBase.proto";
|
import "HBase.proto";
|
||||||
|
import "Procedure.proto";
|
||||||
|
|
||||||
enum LockType {
|
enum LockType {
|
||||||
EXCLUSIVE = 1;
|
EXCLUSIVE = 1;
|
||||||
|
@ -70,6 +71,27 @@ message LockProcedureData {
|
||||||
optional bool is_master_lock = 6 [default = false];
|
optional bool is_master_lock = 6 [default = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum ResourceType {
|
||||||
|
RESOURCE_TYPE_SERVER = 1;
|
||||||
|
RESOURCE_TYPE_NAMESPACE = 2;
|
||||||
|
RESOURCE_TYPE_TABLE = 3;
|
||||||
|
RESOURCE_TYPE_REGION = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message WaitingProcedure {
|
||||||
|
required LockType lock_type = 1;
|
||||||
|
required Procedure procedure = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LockInfo {
|
||||||
|
required ResourceType resource_type = 1;
|
||||||
|
optional string resource_name = 2;
|
||||||
|
required LockType lock_type = 3;
|
||||||
|
optional Procedure exclusive_lock_owner_procedure = 4;
|
||||||
|
optional int32 shared_lock_count = 5;
|
||||||
|
repeated WaitingProcedure waitingProcedures = 6;
|
||||||
|
}
|
||||||
|
|
||||||
service LockService {
|
service LockService {
|
||||||
/** Acquire lock on namespace/table/region */
|
/** Acquire lock on namespace/table/region */
|
||||||
rpc RequestLock(LockRequest) returns(LockResponse);
|
rpc RequestLock(LockRequest) returns(LockResponse);
|
||||||
|
|
|
@ -30,6 +30,7 @@ import "HBase.proto";
|
||||||
import "Client.proto";
|
import "Client.proto";
|
||||||
import "ClusterStatus.proto";
|
import "ClusterStatus.proto";
|
||||||
import "ErrorHandling.proto";
|
import "ErrorHandling.proto";
|
||||||
|
import "LockService.proto";
|
||||||
import "Procedure.proto";
|
import "Procedure.proto";
|
||||||
import "Quota.proto";
|
import "Quota.proto";
|
||||||
import "Replication.proto";
|
import "Replication.proto";
|
||||||
|
@ -534,6 +535,13 @@ message ListProceduresResponse {
|
||||||
repeated Procedure procedure = 1;
|
repeated Procedure procedure = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message ListLocksRequest {
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListLocksResponse {
|
||||||
|
repeated LockInfo lock = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message SetQuotaRequest {
|
message SetQuotaRequest {
|
||||||
optional string user_name = 1;
|
optional string user_name = 1;
|
||||||
optional string user_group = 2;
|
optional string user_group = 2;
|
||||||
|
@ -888,6 +896,9 @@ service MasterService {
|
||||||
rpc ListProcedures(ListProceduresRequest)
|
rpc ListProcedures(ListProceduresRequest)
|
||||||
returns(ListProceduresResponse);
|
returns(ListProceduresResponse);
|
||||||
|
|
||||||
|
rpc ListLocks(ListLocksRequest)
|
||||||
|
returns(ListLocksResponse);
|
||||||
|
|
||||||
/** Add a replication peer */
|
/** Add a replication peer */
|
||||||
rpc AddReplicationPeer(AddReplicationPeerRequest)
|
rpc AddReplicationPeer(AddReplicationPeerRequest)
|
||||||
returns(AddReplicationPeerResponse);
|
returns(AddReplicationPeerResponse);
|
||||||
|
|
|
@ -125,7 +125,7 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
|
||||||
<ul class="nav navbar-nav">
|
<ul class="nav navbar-nav">
|
||||||
<li class="active"><a href="/">Home</a></li>
|
<li class="active"><a href="/">Home</a></li>
|
||||||
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
||||||
<li><a href="/procedures.jsp">Procedures</a></li>
|
<li><a href="/procedures.jsp">Procedures & Locks</a></li>
|
||||||
<li><a href="/logs/">Local Logs</a></li>
|
<li><a href="/logs/">Local Logs</a></li>
|
||||||
<li><a href="/logLevel">Log Level</a></li>
|
<li><a href="/logLevel">Log Level</a></li>
|
||||||
<li><a href="/dump">Debug Dump</a></li>
|
<li><a href="/dump">Debug Dump</a></li>
|
||||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
|
||||||
import org.apache.hadoop.hbase.master.locking.LockProcedure;
|
import org.apache.hadoop.hbase.master.locking.LockProcedure;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||||
import org.apache.hadoop.hbase.net.Address;
|
import org.apache.hadoop.hbase.net.Address;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||||
|
@ -982,6 +983,24 @@ public interface MasterObserver extends Coprocessor {
|
||||||
ObserverContext<MasterCoprocessorEnvironment> ctx,
|
ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||||
List<ProcedureInfo> procInfoList) throws IOException {}
|
List<ProcedureInfo> procInfoList) throws IOException {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called before a listLocks request has been processed.
|
||||||
|
* @param ctx the environment to interact with the framework and master
|
||||||
|
* @throws IOException if something went wrong
|
||||||
|
*/
|
||||||
|
default void preListLocks(ObserverContext<MasterCoprocessorEnvironment> ctx)
|
||||||
|
throws IOException {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called after a listLocks request has been processed.
|
||||||
|
* @param ctx the environment to interact with the framework and master
|
||||||
|
* @param lockInfoList the list of locks about to be returned
|
||||||
|
* @throws IOException if something went wrong
|
||||||
|
*/
|
||||||
|
default void postListLocks(
|
||||||
|
ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||||
|
List<LockInfo> lockInfoList) throws IOException {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called prior to moving a given region from one region server to another.
|
* Called prior to moving a given region from one region server to another.
|
||||||
* @param ctx the environment to interact with the framework and master
|
* @param ctx the environment to interact with the framework and master
|
||||||
|
|
|
@ -65,7 +65,6 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
import org.apache.hadoop.hbase.PleaseHoldException;
|
import org.apache.hadoop.hbase.PleaseHoldException;
|
||||||
import org.apache.hadoop.hbase.ProcedureInfo;
|
import org.apache.hadoop.hbase.ProcedureInfo;
|
||||||
import org.apache.hadoop.hbase.RegionStateListener;
|
|
||||||
import org.apache.hadoop.hbase.ScheduledChore;
|
import org.apache.hadoop.hbase.ScheduledChore;
|
||||||
import org.apache.hadoop.hbase.ServerLoad;
|
import org.apache.hadoop.hbase.ServerLoad;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
@ -113,6 +112,7 @@ import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
|
||||||
import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
|
import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||||
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MergeTableRegionsProcedure;
|
import org.apache.hadoop.hbase.master.procedure.MergeTableRegionsProcedure;
|
||||||
import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
|
import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
|
||||||
|
@ -128,6 +128,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||||
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
|
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
|
||||||
import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
|
import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
|
import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||||
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
|
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
|
||||||
|
@ -979,7 +980,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
|
|
||||||
void initQuotaManager() throws IOException {
|
void initQuotaManager() throws IOException {
|
||||||
MasterQuotaManager quotaManager = new MasterQuotaManager(this);
|
MasterQuotaManager quotaManager = new MasterQuotaManager(this);
|
||||||
this.assignmentManager.setRegionStateListener((RegionStateListener)quotaManager);
|
this.assignmentManager.setRegionStateListener(quotaManager);
|
||||||
quotaManager.start();
|
quotaManager.start();
|
||||||
this.quotaManager = quotaManager;
|
this.quotaManager = quotaManager;
|
||||||
}
|
}
|
||||||
|
@ -1141,8 +1142,8 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
procedureStore = new WALProcedureStore(conf, walDir.getFileSystem(conf), walDir,
|
procedureStore = new WALProcedureStore(conf, walDir.getFileSystem(conf), walDir,
|
||||||
new MasterProcedureEnv.WALStoreLeaseRecovery(this));
|
new MasterProcedureEnv.WALStoreLeaseRecovery(this));
|
||||||
procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
|
procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
|
||||||
procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
|
MasterProcedureScheduler procedureScheduler = procEnv.getProcedureScheduler();
|
||||||
procEnv.getProcedureScheduler());
|
procedureExecutor = new ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
|
||||||
configurationManager.registerObserver(procEnv);
|
configurationManager.registerObserver(procEnv);
|
||||||
|
|
||||||
final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
|
final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
|
||||||
|
@ -2909,6 +2910,34 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
return procInfoList;
|
return procInfoList;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Map<Long, ProcedureInfo> getProcedureInfos() {
|
||||||
|
final List<ProcedureInfo> list = procedureExecutor.listProcedures();
|
||||||
|
final Map<Long, ProcedureInfo> map = new HashMap<>();
|
||||||
|
|
||||||
|
for (ProcedureInfo procedureInfo : list) {
|
||||||
|
map.put(procedureInfo.getProcId(), procedureInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<LockInfo> listLocks() throws IOException {
|
||||||
|
if (cpHost != null) {
|
||||||
|
cpHost.preListLocks();
|
||||||
|
}
|
||||||
|
|
||||||
|
MasterProcedureScheduler procedureScheduler = procedureExecutor.getEnvironment().getProcedureScheduler();
|
||||||
|
|
||||||
|
final List<LockInfo> lockInfoList = procedureScheduler.listLocks();
|
||||||
|
|
||||||
|
if (cpHost != null) {
|
||||||
|
cpHost.postListLocks(lockInfoList);
|
||||||
|
}
|
||||||
|
|
||||||
|
return lockInfoList;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the list of table descriptors that match the specified request
|
* Returns the list of table descriptors that match the specified request
|
||||||
* @param namespace the namespace to query, or null if querying for all
|
* @param namespace the namespace to query, or null if querying for all
|
||||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.master.locking.LockProcedure;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||||
import org.apache.hadoop.hbase.net.Address;
|
import org.apache.hadoop.hbase.net.Address;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
|
@ -706,6 +707,26 @@ public class MasterCoprocessorHost
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean preListLocks() throws IOException {
|
||||||
|
return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
|
||||||
|
@Override
|
||||||
|
public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
|
||||||
|
throws IOException {
|
||||||
|
oserver.preListLocks(ctx);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public void postListLocks(final List<LockInfo> lockInfoList) throws IOException {
|
||||||
|
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
|
||||||
|
@Override
|
||||||
|
public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
|
||||||
|
throws IOException {
|
||||||
|
oserver.postListLocks(ctx, lockInfoList);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
public boolean preMove(final HRegionInfo region, final ServerName srcServer,
|
public boolean preMove(final HRegionInfo region, final ServerName srcServer,
|
||||||
final ServerName destServer) throws IOException {
|
final ServerName destServer) throws IOException {
|
||||||
return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
|
return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
|
||||||
|
|
|
@ -56,8 +56,8 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
|
||||||
import org.apache.hadoop.hbase.mob.MobUtils;
|
import org.apache.hadoop.hbase.mob.MobUtils;
|
||||||
import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
|
import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.procedure2.Procedure;
|
import org.apache.hadoop.hbase.procedure2.Procedure;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
|
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationException;
|
import org.apache.hadoop.hbase.replication.ReplicationException;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
||||||
|
@ -86,129 +86,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockH
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.*;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
||||||
|
@ -1108,7 +987,7 @@ public class MasterRpcServices extends RSRpcServices
|
||||||
}
|
}
|
||||||
master.getMasterProcedureExecutor().removeResult(request.getProcId());
|
master.getMasterProcedureExecutor().removeResult(request.getProcId());
|
||||||
} else {
|
} else {
|
||||||
Procedure proc = v.getSecond();
|
Procedure<?> proc = v.getSecond();
|
||||||
if (proc == null) {
|
if (proc == null) {
|
||||||
builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
|
builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1160,7 +1039,7 @@ public class MasterRpcServices extends RSRpcServices
|
||||||
try {
|
try {
|
||||||
final ListProceduresResponse.Builder response = ListProceduresResponse.newBuilder();
|
final ListProceduresResponse.Builder response = ListProceduresResponse.newBuilder();
|
||||||
for (ProcedureInfo p: master.listProcedures()) {
|
for (ProcedureInfo p: master.listProcedures()) {
|
||||||
response.addProcedure(ProcedureUtil.convertToProtoProcedure(p));
|
response.addProcedure(ProtobufUtil.toProtoProcedure(p));
|
||||||
}
|
}
|
||||||
return response.build();
|
return response.build();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
@ -1168,6 +1047,23 @@ public class MasterRpcServices extends RSRpcServices
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ListLocksResponse listLocks(
|
||||||
|
RpcController controller,
|
||||||
|
ListLocksRequest request) throws ServiceException {
|
||||||
|
try {
|
||||||
|
final ListLocksResponse.Builder builder = ListLocksResponse.newBuilder();
|
||||||
|
|
||||||
|
for (LockInfo lockInfo: master.listLocks()) {
|
||||||
|
builder.addLock(ProtobufUtil.toProtoLockInfo(lockInfo));
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder.build();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
|
public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
|
||||||
ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
|
ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
|
||||||
|
@ -1459,7 +1355,6 @@ public class MasterRpcServices extends RSRpcServices
|
||||||
throw new UnknownRegionException(Bytes.toString(regionName));
|
throw new UnknownRegionException(Bytes.toString(regionName));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
|
|
||||||
HRegionInfo hri = pair.getFirst();
|
HRegionInfo hri = pair.getFirst();
|
||||||
if (master.cpHost != null) {
|
if (master.cpHost != null) {
|
||||||
if (master.cpHost.preUnassign(hri, force)) {
|
if (master.cpHost.preUnassign(hri, force)) {
|
||||||
|
|
|
@ -39,12 +39,12 @@ import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||||
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
|
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||||
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
|
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationException;
|
import org.apache.hadoop.hbase.replication.ReplicationException;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
|
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
|
||||||
|
|
||||||
import com.google.protobuf.Service;
|
import com.google.protobuf.Service;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -352,6 +352,13 @@ public interface MasterServices extends Server {
|
||||||
*/
|
*/
|
||||||
public List<ProcedureInfo> listProcedures() throws IOException;
|
public List<ProcedureInfo> listProcedures() throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List locks
|
||||||
|
* @return lock list
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public List<LockInfo> listLocks() throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get list of table descriptors by namespace
|
* Get list of table descriptors by namespace
|
||||||
* @param name namespace name
|
* @param name namespace name
|
||||||
|
|
|
@ -204,6 +204,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
|
||||||
* WALs.
|
* WALs.
|
||||||
* @return false, so procedure framework doesn't mark this procedure as failure.
|
* @return false, so procedure framework doesn't mark this procedure as failure.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
protected boolean setTimeoutFailure(final MasterProcedureEnv env) {
|
protected boolean setTimeoutFailure(final MasterProcedureEnv env) {
|
||||||
synchronized (event) {
|
synchronized (event) {
|
||||||
if (LOG.isDebugEnabled()) LOG.debug("Timeout failure " + this.event);
|
if (LOG.isDebugEnabled()) LOG.debug("Timeout failure " + this.event);
|
||||||
|
@ -231,7 +232,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Procedure[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
|
protected Procedure<?>[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
|
||||||
// Local master locks don't store any state, so on recovery, simply finish this procedure
|
// Local master locks don't store any state, so on recovery, simply finish this procedure
|
||||||
// immediately.
|
// immediately.
|
||||||
if (recoveredMasterLock) return null;
|
if (recoveredMasterLock) return null;
|
||||||
|
@ -334,6 +335,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
|
||||||
setState(ProcedureProtos.ProcedureState.RUNNABLE);
|
setState(ProcedureProtos.ProcedureState.RUNNABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
protected void toStringClassDetails(final StringBuilder builder) {
|
protected void toStringClassDetails(final StringBuilder builder) {
|
||||||
super.toStringClassDetails(builder);
|
super.toStringClassDetails(builder);
|
||||||
if (regionInfos != null) {
|
if (regionInfos != null) {
|
||||||
|
@ -350,6 +352,10 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
|
||||||
builder.append(", type=").append(type);
|
builder.append(", type=").append(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public LockType getType() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
private LockInterface setupLock() throws IllegalArgumentException {
|
private LockInterface setupLock() throws IllegalArgumentException {
|
||||||
if (regionInfos != null) {
|
if (regionInfos != null) {
|
||||||
return setupRegionLock();
|
return setupRegionLock();
|
||||||
|
|
|
@ -21,27 +21,34 @@ package org.apache.hadoop.hbase.master.procedure;
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Map.Entry;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.ProcedureInfo;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableExistsException;
|
import org.apache.hadoop.hbase.TableExistsException;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.master.locking.LockProcedure;
|
||||||
import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
|
import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
|
||||||
import org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler;
|
import org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockAndQueue;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.procedure2.LockStatus;
|
import org.apache.hadoop.hbase.procedure2.LockStatus;
|
||||||
import org.apache.hadoop.hbase.procedure2.Procedure;
|
import org.apache.hadoop.hbase.procedure2.Procedure;
|
||||||
import org.apache.hadoop.hbase.procedure2.LockAndQueue;
|
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureDeque;
|
import org.apache.hadoop.hbase.procedure2.ProcedureDeque;
|
||||||
import org.apache.hadoop.hbase.util.AvlUtil.AvlKeyComparator;
|
import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
|
||||||
import org.apache.hadoop.hbase.util.AvlUtil.AvlIterableList;
|
import org.apache.hadoop.hbase.util.AvlUtil.AvlIterableList;
|
||||||
|
import org.apache.hadoop.hbase.util.AvlUtil.AvlKeyComparator;
|
||||||
import org.apache.hadoop.hbase.util.AvlUtil.AvlLinkedNode;
|
import org.apache.hadoop.hbase.util.AvlUtil.AvlLinkedNode;
|
||||||
import org.apache.hadoop.hbase.util.AvlUtil.AvlTree;
|
import org.apache.hadoop.hbase.util.AvlUtil.AvlTree;
|
||||||
import org.apache.hadoop.hbase.util.AvlUtil.AvlTreeIterator;
|
import org.apache.hadoop.hbase.util.AvlUtil.AvlTreeIterator;
|
||||||
|
@ -226,7 +233,111 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
|
||||||
return pollResult;
|
return pollResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
private LockInfo createLockInfo(LockInfo.ResourceType resourceType,
|
||||||
|
String resourceName, LockAndQueue queue) {
|
||||||
|
LockInfo info = new LockInfo();
|
||||||
|
|
||||||
|
info.setResourceType(resourceType);
|
||||||
|
info.setResourceName(resourceName);
|
||||||
|
|
||||||
|
if (queue.hasExclusiveLock()) {
|
||||||
|
info.setLockType(LockInfo.LockType.EXCLUSIVE);
|
||||||
|
|
||||||
|
Procedure<?> exclusiveLockOwnerProcedure = queue.getExclusiveLockOwnerProcedure();
|
||||||
|
ProcedureInfo exclusiveLockOwnerProcedureInfo =
|
||||||
|
ProcedureUtil.convertToProcedureInfo(exclusiveLockOwnerProcedure);
|
||||||
|
info.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureInfo);
|
||||||
|
} else if (queue.getSharedLockCount() > 0) {
|
||||||
|
info.setLockType(LockInfo.LockType.SHARED);
|
||||||
|
info.setSharedLockCount(queue.getSharedLockCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
for (Procedure<?> procedure : queue) {
|
||||||
|
if (!(procedure instanceof LockProcedure)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
LockProcedure lockProcedure = (LockProcedure)procedure;
|
||||||
|
LockInfo.WaitingProcedure waitingProcedure = new LockInfo.WaitingProcedure();
|
||||||
|
|
||||||
|
switch (lockProcedure.getType()) {
|
||||||
|
case EXCLUSIVE:
|
||||||
|
waitingProcedure.setLockType(LockInfo.LockType.EXCLUSIVE);
|
||||||
|
break;
|
||||||
|
case SHARED:
|
||||||
|
waitingProcedure.setLockType(LockInfo.LockType.SHARED);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ProcedureInfo procedureInfo = ProcedureUtil.convertToProcedureInfo(lockProcedure);
|
||||||
|
waitingProcedure.setProcedure(procedureInfo);
|
||||||
|
|
||||||
|
info.addWaitingProcedure(waitingProcedure);
|
||||||
|
}
|
||||||
|
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<LockInfo> listLocks() {
|
||||||
|
schedLock();
|
||||||
|
|
||||||
|
try {
|
||||||
|
List<LockInfo> lockInfos = new ArrayList<>();
|
||||||
|
|
||||||
|
for (Entry<ServerName, LockAndQueue> entry : locking.serverLocks
|
||||||
|
.entrySet()) {
|
||||||
|
String serverName = entry.getKey().getServerName();
|
||||||
|
LockAndQueue queue = entry.getValue();
|
||||||
|
|
||||||
|
if (queue.isLocked()) {
|
||||||
|
LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.SERVER,
|
||||||
|
serverName, queue);
|
||||||
|
lockInfos.add(lockInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (Entry<String, LockAndQueue> entry : locking.namespaceLocks
|
||||||
|
.entrySet()) {
|
||||||
|
String namespaceName = entry.getKey();
|
||||||
|
LockAndQueue queue = entry.getValue();
|
||||||
|
|
||||||
|
if (queue.isLocked()) {
|
||||||
|
LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.NAMESPACE,
|
||||||
|
namespaceName, queue);
|
||||||
|
lockInfos.add(lockInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (Entry<TableName, LockAndQueue> entry : locking.tableLocks
|
||||||
|
.entrySet()) {
|
||||||
|
String tableName = entry.getKey().getNameAsString();
|
||||||
|
LockAndQueue queue = entry.getValue();
|
||||||
|
|
||||||
|
if (queue.isLocked()) {
|
||||||
|
LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.TABLE,
|
||||||
|
tableName, queue);
|
||||||
|
lockInfos.add(lockInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (Entry<String, LockAndQueue> entry : locking.regionLocks.entrySet()) {
|
||||||
|
String regionName = entry.getKey();
|
||||||
|
LockAndQueue queue = entry.getValue();
|
||||||
|
|
||||||
|
if (queue.isLocked()) {
|
||||||
|
LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.REGION,
|
||||||
|
regionName, queue);
|
||||||
|
lockInfos.add(lockInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return lockInfos;
|
||||||
|
} finally {
|
||||||
|
schedUnlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void clear() {
|
public void clear() {
|
||||||
schedLock();
|
schedLock();
|
||||||
|
@ -390,6 +501,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
|
||||||
super(serverName, serverLock);
|
super(serverName, serverLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public boolean requireExclusiveLock(Procedure proc) {
|
public boolean requireExclusiveLock(Procedure proc) {
|
||||||
ServerProcedureInterface spi = (ServerProcedureInterface)proc;
|
ServerProcedureInterface spi = (ServerProcedureInterface)proc;
|
||||||
switch (spi.getServerOperationType()) {
|
switch (spi.getServerOperationType()) {
|
||||||
|
@ -437,6 +549,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public boolean requireExclusiveLock(Procedure proc) {
|
public boolean requireExclusiveLock(Procedure proc) {
|
||||||
return requireTableExclusiveLock((TableProcedureInterface)proc);
|
return requireTableExclusiveLock((TableProcedureInterface)proc);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,9 +27,10 @@
|
||||||
import="java.util.Set"
|
import="java.util.Set"
|
||||||
import="org.apache.hadoop.conf.Configuration"
|
import="org.apache.hadoop.conf.Configuration"
|
||||||
import="org.apache.hadoop.hbase.HBaseConfiguration"
|
import="org.apache.hadoop.hbase.HBaseConfiguration"
|
||||||
import="org.apache.hadoop.hbase.ProcedureInfo"
|
|
||||||
import="org.apache.hadoop.hbase.master.HMaster"
|
import="org.apache.hadoop.hbase.master.HMaster"
|
||||||
import="org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv"
|
import="org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv"
|
||||||
|
import="org.apache.hadoop.hbase.ProcedureInfo"
|
||||||
|
import="org.apache.hadoop.hbase.procedure2.LockInfo"
|
||||||
import="org.apache.hadoop.hbase.procedure2.ProcedureExecutor"
|
import="org.apache.hadoop.hbase.procedure2.ProcedureExecutor"
|
||||||
import="org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALFile"
|
import="org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALFile"
|
||||||
import="org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore"
|
import="org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore"
|
||||||
|
@ -55,6 +56,8 @@
|
||||||
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
|
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
List<LockInfo> locks = master.listLocks();
|
||||||
%>
|
%>
|
||||||
<!--[if IE]>
|
<!--[if IE]>
|
||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
|
@ -62,15 +65,15 @@
|
||||||
<?xml version="1.0" encoding="UTF-8" ?>
|
<?xml version="1.0" encoding="UTF-8" ?>
|
||||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||||
<head>
|
<head>
|
||||||
<meta charset="utf-8">
|
<meta charset="utf-8" />
|
||||||
<title>HBase Master Procedures: <%= master.getServerName() %></title>
|
<title>HBase Master Procedures: <%= master.getServerName() %></title>
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
<meta name="description" content="">
|
<meta name="description" content="" />
|
||||||
<meta name="author" content="">
|
<meta name="author" content="" />
|
||||||
|
|
||||||
<link href="/static/css/bootstrap.min.css" rel="stylesheet">
|
<link href="/static/css/bootstrap.min.css" rel="stylesheet" />
|
||||||
<link href="/static/css/bootstrap-theme.min.css" rel="stylesheet">
|
<link href="/static/css/bootstrap-theme.min.css" rel="stylesheet" />
|
||||||
<link href="/static/css/hbase.css" rel="stylesheet">
|
<link href="/static/css/hbase.css" rel="stylesheet" />
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div class="navbar navbar-fixed-top navbar-default">
|
<div class="navbar navbar-fixed-top navbar-default">
|
||||||
|
@ -87,7 +90,7 @@
|
||||||
<ul class="nav navbar-nav">
|
<ul class="nav navbar-nav">
|
||||||
<li><a href="/master-status">Home</a></li>
|
<li><a href="/master-status">Home</a></li>
|
||||||
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
||||||
<li><a href="/procedures.jsp">Procedures</a></li>
|
<li><a href="/procedures.jsp">Procedures & Locks</a></li>
|
||||||
<li><a href="/logs/">Local Logs</a></li>
|
<li><a href="/logs/">Local Logs</a></li>
|
||||||
<li><a href="/logLevel">Log Level</a></li>
|
<li><a href="/logLevel">Log Level</a></li>
|
||||||
<li><a href="/dump">Debug Dump</a></li>
|
<li><a href="/dump">Debug Dump</a></li>
|
||||||
|
@ -116,43 +119,42 @@
|
||||||
<th>Last Update</th>
|
<th>Last Update</th>
|
||||||
<th>Errors</th>
|
<th>Errors</th>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<% for (ProcedureInfo procInfo : procedures) { %>
|
||||||
<% for (ProcedureInfo procInfo : procedures) { %>
|
|
||||||
<tr>
|
<tr>
|
||||||
<td><%= procInfo.getProcId() %></a></td>
|
<td><%= procInfo.getProcId() %></td>
|
||||||
<td><%= procInfo.hasParentId() ? procInfo.getParentId() : "" %></a></td>
|
<td><%= procInfo.hasParentId() ? procInfo.getParentId() : "" %></td>
|
||||||
<td><%= escapeXml(procInfo.getProcState().toString()) %></a></td>
|
<td><%= escapeXml(procInfo.getProcState().toString()) %></td>
|
||||||
<td><%= escapeXml(procInfo.getProcOwner()) %></a></td>
|
<td><%= escapeXml(procInfo.getProcOwner()) %></td>
|
||||||
<td><%= escapeXml(procInfo.getProcName()) %></a></td>
|
<td><%= escapeXml(procInfo.getProcName()) %></td>
|
||||||
<td><%= new Date(procInfo.getSubmittedTime()) %></a></td>
|
<td><%= new Date(procInfo.getSubmittedTime()) %></td>
|
||||||
<td><%= new Date(procInfo.getLastUpdate()) %></a></td>
|
<td><%= new Date(procInfo.getLastUpdate()) %></td>
|
||||||
<td><%= escapeXml(procInfo.isFailed() ? procInfo.getException().getMessage() : "") %></a></td>
|
<td><%= escapeXml(procInfo.isFailed() ? procInfo.getException().getMessage() : "") %></td>
|
||||||
</tr>
|
</tr>
|
||||||
<% } %>
|
<% } %>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
<br>
|
<br />
|
||||||
<div class="container-fluid content">
|
<div class="container-fluid content">
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="page-header">
|
<div class="page-header">
|
||||||
<h2>Procedure WAL State</h2>
|
<h2>Procedure WAL State</h2>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="tabbable">
|
<div class="tabbable">
|
||||||
<ul class="nav nav-pills">
|
<ul class="nav nav-pills">
|
||||||
<li class="active">
|
<li class="active">
|
||||||
<a href="#tab_WALFiles" data-toggle="tab">WAL files</a>
|
<a href="#tab_WALFiles" data-toggle="tab">WAL files</a>
|
||||||
</li>
|
</li>
|
||||||
<li class="">
|
<li class="">
|
||||||
<a href="#tab_WALFilesCorrupted" data-toggle="tab">Corrupted WAL files</a>
|
<a href="#tab_WALFilesCorrupted" data-toggle="tab">Corrupted WAL files</a>
|
||||||
</li>
|
</li>
|
||||||
<li class="">
|
<li class="">
|
||||||
<a href="#tab_WALRollTime" data-toggle="tab">WAL roll time</a>
|
<a href="#tab_WALRollTime" data-toggle="tab">WAL roll time</a>
|
||||||
</li>
|
</li>
|
||||||
<li class="">
|
<li class="">
|
||||||
<a href="#tab_SyncStats" data-toggle="tab">Sync stats</a>
|
<a href="#tab_SyncStats" data-toggle="tab">Sync stats</a>
|
||||||
</li>
|
</li>
|
||||||
</ul>
|
</ul>
|
||||||
<div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
|
<div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
|
||||||
<div class="tab-pane active" id="tab_WALFiles">
|
<div class="tab-pane active" id="tab_WALFiles">
|
||||||
<% if (procedureWALFiles != null && procedureWALFiles.size() > 0) { %>
|
<% if (procedureWALFiles != null && procedureWALFiles.size() > 0) { %>
|
||||||
|
@ -168,8 +170,8 @@
|
||||||
<tr>
|
<tr>
|
||||||
<td> <%= pwf.getLogId() %></td>
|
<td> <%= pwf.getLogId() %></td>
|
||||||
<td> <%= StringUtils.humanSize(pwf.getSize()) %> </td>
|
<td> <%= StringUtils.humanSize(pwf.getSize()) %> </td>
|
||||||
<td> <%= new Date(pwf.getTimestamp()) %></a></td>
|
<td> <%= new Date(pwf.getTimestamp()) %> </td>
|
||||||
<td> <%= escapeXml(pwf.toString()) %></t>
|
<td> <%= escapeXml(pwf.toString()) %> </td>
|
||||||
</tr>
|
</tr>
|
||||||
<% } %>
|
<% } %>
|
||||||
</table>
|
</table>
|
||||||
|
@ -190,8 +192,8 @@
|
||||||
<tr>
|
<tr>
|
||||||
<td> <%= cwf.getLogId() %></td>
|
<td> <%= cwf.getLogId() %></td>
|
||||||
<td> <%= StringUtils.humanSize(cwf.getSize()) %> </td>
|
<td> <%= StringUtils.humanSize(cwf.getSize()) %> </td>
|
||||||
<td> <%= new Date(cwf.getTimestamp()) %></a></td>
|
<td> <%= new Date(cwf.getTimestamp()) %> </td>
|
||||||
<td> <%= escapeXml(cwf.toString()) %></t>
|
<td> <%= escapeXml(cwf.toString()) %> </td>
|
||||||
</tr>
|
</tr>
|
||||||
<% } %>
|
<% } %>
|
||||||
</table>
|
</table>
|
||||||
|
@ -223,7 +225,7 @@
|
||||||
<% for (int i = syncMetricsBuff.size() - 1; i >= 0; --i) { %>
|
<% for (int i = syncMetricsBuff.size() - 1; i >= 0; --i) { %>
|
||||||
<% WALProcedureStore.SyncMetrics syncMetrics = syncMetricsBuff.get(i); %>
|
<% WALProcedureStore.SyncMetrics syncMetrics = syncMetricsBuff.get(i); %>
|
||||||
<tr>
|
<tr>
|
||||||
<td> <%= new Date(syncMetrics.getTimestamp()) %></a></td>
|
<td> <%= new Date(syncMetrics.getTimestamp()) %></td>
|
||||||
<td> <%= StringUtils.humanTimeDiff(syncMetrics.getSyncWaitMs()) %></td>
|
<td> <%= StringUtils.humanTimeDiff(syncMetrics.getSyncWaitMs()) %></td>
|
||||||
<td> <%= syncMetrics.getSyncedEntries() %></td>
|
<td> <%= syncMetrics.getSyncedEntries() %></td>
|
||||||
<td> <%= StringUtils.humanSize(syncMetrics.getTotalSyncedBytes()) %></td>
|
<td> <%= StringUtils.humanSize(syncMetrics.getTotalSyncedBytes()) %></td>
|
||||||
|
@ -235,6 +237,51 @@
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
<br />
|
||||||
|
<div class="container-fluid content">
|
||||||
|
<div class="row">
|
||||||
|
<div class="page-header">
|
||||||
|
<h1>Locks</h1>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<% for (LockInfo lock : locks) { %>
|
||||||
|
<h2><%= lock.getResourceType() %>: <%= lock.getResourceName() %></h2>
|
||||||
|
<%
|
||||||
|
switch (lock.getLockType()) {
|
||||||
|
case EXCLUSIVE:
|
||||||
|
%>
|
||||||
|
<p>Lock type: EXCLUSIVE</p>
|
||||||
|
<p>Owner procedure ID: <%= lock.getExclusiveLockOwnerProcedure().getProcId() %></p>
|
||||||
|
<%
|
||||||
|
break;
|
||||||
|
case SHARED:
|
||||||
|
%>
|
||||||
|
<p>Lock type: SHARED</p>
|
||||||
|
<p>Number of shared locks: <%= lock.getSharedLockCount() %></p>
|
||||||
|
<%
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
List<LockInfo.WaitingProcedure> waitingProcedures = lock.getWaitingProcedures();
|
||||||
|
|
||||||
|
if (!waitingProcedures.isEmpty()) {
|
||||||
|
%>
|
||||||
|
<h3>Waiting procedures</h3>
|
||||||
|
<table class="table table-striped" width="90%" >
|
||||||
|
<tr>
|
||||||
|
<th>Lock type</th>
|
||||||
|
<th>Procedure ID</th>
|
||||||
|
</tr>
|
||||||
|
<% for (LockInfo.WaitingProcedure waitingProcedure : waitingProcedures) { %>
|
||||||
|
<tr>
|
||||||
|
<td><%= waitingProcedure.getLockType() %></td>
|
||||||
|
<td><%= waitingProcedure.getProcedure().getProcId() %></td>
|
||||||
|
</tr>
|
||||||
|
<% } %>
|
||||||
|
</table>
|
||||||
|
<% } %>
|
||||||
|
<% } %>
|
||||||
|
</div>
|
||||||
<script src="/static/js/jquery.min.js" type="text/javascript"></script>
|
<script src="/static/js/jquery.min.js" type="text/javascript"></script>
|
||||||
<script src="/static/js/bootstrap.min.js" type="text/javascript"></script>
|
<script src="/static/js/bootstrap.min.js" type="text/javascript"></script>
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@
|
||||||
<ul class="nav navbar-nav">
|
<ul class="nav navbar-nav">
|
||||||
<li><a href="/master-status">Home</a></li>
|
<li><a href="/master-status">Home</a></li>
|
||||||
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
||||||
<li><a href="/procedures.jsp">Procedures</a></li>
|
<li><a href="/procedures.jsp">Procedures & Locks</a></li>
|
||||||
<li><a href="/logs/">Local Logs</a></li>
|
<li><a href="/logs/">Local Logs</a></li>
|
||||||
<li><a href="/logLevel">Log Level</a></li>
|
<li><a href="/logLevel">Log Level</a></li>
|
||||||
<li><a href="/dump">Debug Dump</a></li>
|
<li><a href="/dump">Debug Dump</a></li>
|
||||||
|
|
|
@ -81,7 +81,7 @@
|
||||||
<ul class="nav navbar-nav">
|
<ul class="nav navbar-nav">
|
||||||
<li><a href="/master-status">Home</a></li>
|
<li><a href="/master-status">Home</a></li>
|
||||||
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
||||||
<li><a href="/procedures.jsp">Procedures</a></li>
|
<li><a href="/procedures.jsp">Procedures & Locks</a></li>
|
||||||
<li><a href="/logs/">Local Logs</a></li>
|
<li><a href="/logs/">Local Logs</a></li>
|
||||||
<li><a href="/logLevel">Log Level</a></li>
|
<li><a href="/logLevel">Log Level</a></li>
|
||||||
<li><a href="/dump">Debug Dump</a></li>
|
<li><a href="/dump">Debug Dump</a></li>
|
||||||
|
|
|
@ -158,7 +158,7 @@
|
||||||
<ul class="nav navbar-nav">
|
<ul class="nav navbar-nav">
|
||||||
<li><a href="/master-status">Home</a></li>
|
<li><a href="/master-status">Home</a></li>
|
||||||
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
||||||
<li><a href="/procedures.jsp">Procedures</a></li>
|
<li><a href="/procedures.jsp">Procedures & Locks</a></li>
|
||||||
<li><a href="/logs/">Local Logs</a></li>
|
<li><a href="/logs/">Local Logs</a></li>
|
||||||
<li><a href="/logLevel">Log Level</a></li>
|
<li><a href="/logLevel">Log Level</a></li>
|
||||||
<li><a href="/dump">Debug Dump</a></li>
|
<li><a href="/dump">Debug Dump</a></li>
|
||||||
|
|
|
@ -64,7 +64,7 @@
|
||||||
<ul class="nav navbar-nav">
|
<ul class="nav navbar-nav">
|
||||||
<li class="active"><a href="/master-status">Home</a></li>
|
<li class="active"><a href="/master-status">Home</a></li>
|
||||||
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
||||||
<li><a href="/procedures.jsp">Procedures</a></li>
|
<li><a href="/procedures.jsp">Procedures & Locks</a></li>
|
||||||
<li><a href="/logs/">Local Logs</a></li>
|
<li><a href="/logs/">Local Logs</a></li>
|
||||||
<li><a href="/logLevel">Log Level</a></li>
|
<li><a href="/logLevel">Log Level</a></li>
|
||||||
<li><a href="/dump">Debug Dump</a></li>
|
<li><a href="/dump">Debug Dump</a></li>
|
||||||
|
|
|
@ -60,7 +60,7 @@
|
||||||
<ul class="nav navbar-nav">
|
<ul class="nav navbar-nav">
|
||||||
<li><a href="/master-status">Home</a></li>
|
<li><a href="/master-status">Home</a></li>
|
||||||
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
|
||||||
<li><a href="/procedures.jsp">Procedures</a></li>
|
<li><a href="/procedures.jsp">Procedures & Locks</a></li>
|
||||||
<li><a href="/logs/">Local Logs</a></li>
|
<li><a href="/logs/">Local Logs</a></li>
|
||||||
<li><a href="/logLevel">Log Level</a></li>
|
<li><a href="/logLevel">Log Level</a></li>
|
||||||
<li><a href="/dump">Debug Dump</a></li>
|
<li><a href="/dump">Debug Dump</a></li>
|
||||||
|
|
|
@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
|
||||||
import org.apache.hadoop.hbase.master.locking.LockProcedure;
|
import org.apache.hadoop.hbase.master.locking.LockProcedure;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||||
import org.apache.hadoop.hbase.net.Address;
|
import org.apache.hadoop.hbase.net.Address;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
|
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
|
@ -125,6 +126,8 @@ public class TestMasterObserver {
|
||||||
private boolean postAbortProcedureCalled;
|
private boolean postAbortProcedureCalled;
|
||||||
private boolean preListProceduresCalled;
|
private boolean preListProceduresCalled;
|
||||||
private boolean postListProceduresCalled;
|
private boolean postListProceduresCalled;
|
||||||
|
private boolean preListLocksCalled;
|
||||||
|
private boolean postListLocksCalled;
|
||||||
private boolean preMoveCalled;
|
private boolean preMoveCalled;
|
||||||
private boolean postMoveCalled;
|
private boolean postMoveCalled;
|
||||||
private boolean preAssignCalled;
|
private boolean preAssignCalled;
|
||||||
|
@ -725,6 +728,25 @@ public class TestMasterObserver {
|
||||||
return preListProceduresCalled && !postListProceduresCalled;
|
return preListProceduresCalled && !postListProceduresCalled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void preListLocks(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
|
||||||
|
preListLocksCalled = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void postListLocks(ObserverContext<MasterCoprocessorEnvironment> ctx, List<LockInfo> lockInfoList)
|
||||||
|
throws IOException {
|
||||||
|
postListLocksCalled = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean wasListLocksCalled() {
|
||||||
|
return preListLocksCalled && postListLocksCalled;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean wasPreListLocksCalledOnly() {
|
||||||
|
return preListLocksCalled && !postListLocksCalled;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void preMove(ObserverContext<MasterCoprocessorEnvironment> env,
|
public void preMove(ObserverContext<MasterCoprocessorEnvironment> env,
|
||||||
HRegionInfo region, ServerName srcServer, ServerName destServer)
|
HRegionInfo region, ServerName srcServer, ServerName destServer)
|
||||||
|
@ -2164,6 +2186,22 @@ public class TestMasterObserver {
|
||||||
cp.wasListProceduresCalled());
|
cp.wasListProceduresCalled());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test (timeout=180000)
|
||||||
|
public void testListLocksOperation() throws Exception {
|
||||||
|
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
|
||||||
|
|
||||||
|
HMaster master = cluster.getMaster();
|
||||||
|
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||||
|
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||||
|
CPMasterObserver.class.getName());
|
||||||
|
cp.resetStates();
|
||||||
|
|
||||||
|
master.listLocks();
|
||||||
|
assertTrue(
|
||||||
|
"Coprocessor should be called on list locks request",
|
||||||
|
cp.wasListLocksCalled());
|
||||||
|
}
|
||||||
|
|
||||||
private void deleteTable(Admin admin, TableName tableName) throws Exception {
|
private void deleteTable(Admin admin, TableName tableName) throws Exception {
|
||||||
// NOTE: We need a latch because admin is not sync,
|
// NOTE: We need a latch because admin is not sync,
|
||||||
// so the postOp coprocessor method may be called after the admin operation returned.
|
// so the postOp coprocessor method may be called after the admin operation returned.
|
||||||
|
|
|
@ -39,16 +39,14 @@ import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||||
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
|
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||||
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
|
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationException;
|
import org.apache.hadoop.hbase.replication.ReplicationException;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
||||||
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
|
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
|
||||||
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
import org.mockito.Mockito;
|
|
||||||
|
|
||||||
import com.google.protobuf.Service;
|
import com.google.protobuf.Service;
|
||||||
|
|
||||||
public class MockNoopMasterServices implements MasterServices, Server {
|
public class MockNoopMasterServices implements MasterServices, Server {
|
||||||
|
@ -220,6 +218,11 @@ public class MockNoopMasterServices implements MasterServices, Server {
|
||||||
return null; //To change body of implemented methods use File | Settings | File Templates.
|
return null; //To change body of implemented methods use File | Settings | File Templates.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<LockInfo> listLocks() throws IOException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
|
public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
|
||||||
return null; //To change body of implemented methods use File | Settings | File Templates.
|
return null; //To change body of implemented methods use File | Settings | File Templates.
|
||||||
|
|
|
@ -18,21 +18,24 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.master.procedure;
|
package org.apache.hadoop.hbase.master.procedure;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.*;
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.lang.reflect.Field;
|
||||||
|
import java.lang.reflect.Method;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.master.locking.LockProcedure;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.procedure2.Procedure;
|
import org.apache.hadoop.hbase.procedure2.Procedure;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
|
import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo.WaitingProcedure;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
|
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
|
||||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
@ -899,5 +902,161 @@ public class TestMasterProcedureScheduler {
|
||||||
sb.append(")");
|
sb.append(")");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static LockProcedure createLockProcedure(LockProcedure.LockType lockType, long procId) throws Exception {
|
||||||
|
LockProcedure procedure = new LockProcedure();
|
||||||
|
|
||||||
|
Field typeField = LockProcedure.class.getDeclaredField("type");
|
||||||
|
typeField.setAccessible(true);
|
||||||
|
typeField.set(procedure, lockType);
|
||||||
|
|
||||||
|
Method setProcIdMethod = Procedure.class.getDeclaredMethod("setProcId", long.class);
|
||||||
|
setProcIdMethod.setAccessible(true);
|
||||||
|
setProcIdMethod.invoke(procedure, procId);
|
||||||
|
|
||||||
|
return procedure;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static LockProcedure createExclusiveLockProcedure(long procId) throws Exception {
|
||||||
|
return createLockProcedure(LockProcedure.LockType.EXCLUSIVE, procId);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static LockProcedure createSharedLockProcedure(long procId) throws Exception {
|
||||||
|
return createLockProcedure(LockProcedure.LockType.SHARED, procId);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertLockResource(LockInfo lock,
|
||||||
|
LockInfo.ResourceType resourceType, String resourceName)
|
||||||
|
{
|
||||||
|
assertEquals(resourceType, lock.getResourceType());
|
||||||
|
assertEquals(resourceName, lock.getResourceName());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertExclusiveLock(LockInfo lock, long procId)
|
||||||
|
{
|
||||||
|
assertEquals(LockInfo.LockType.EXCLUSIVE, lock.getLockType());
|
||||||
|
assertEquals(procId, lock.getExclusiveLockOwnerProcedure().getProcId());
|
||||||
|
assertEquals(0, lock.getSharedLockCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertSharedLock(LockInfo lock, int lockCount)
|
||||||
|
{
|
||||||
|
assertEquals(LockInfo.LockType.SHARED, lock.getLockType());
|
||||||
|
assertEquals(lockCount, lock.getSharedLockCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testListLocksServer() throws Exception {
|
||||||
|
LockProcedure procedure = createExclusiveLockProcedure(0);
|
||||||
|
queue.waitServerExclusiveLock(procedure, ServerName.valueOf("server1,1234,0"));
|
||||||
|
|
||||||
|
List<LockInfo> locks = queue.listLocks();
|
||||||
|
assertEquals(1, locks.size());
|
||||||
|
|
||||||
|
LockInfo serverLock = locks.get(0);
|
||||||
|
assertLockResource(serverLock, LockInfo.ResourceType.SERVER, "server1,1234,0");
|
||||||
|
assertExclusiveLock(serverLock, 0);
|
||||||
|
assertTrue(serverLock.getWaitingProcedures().isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testListLocksNamespace() throws Exception {
|
||||||
|
LockProcedure procedure = createExclusiveLockProcedure(1);
|
||||||
|
queue.waitNamespaceExclusiveLock(procedure, "ns1");
|
||||||
|
|
||||||
|
List<LockInfo> locks = queue.listLocks();
|
||||||
|
assertEquals(2, locks.size());
|
||||||
|
|
||||||
|
LockInfo namespaceLock = locks.get(0);
|
||||||
|
assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns1");
|
||||||
|
assertExclusiveLock(namespaceLock, 1);
|
||||||
|
assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
|
||||||
|
|
||||||
|
LockInfo tableLock = locks.get(1);
|
||||||
|
assertLockResource(tableLock, LockInfo.ResourceType.TABLE,
|
||||||
|
TableName.NAMESPACE_TABLE_NAME.getNameAsString());
|
||||||
|
assertSharedLock(tableLock, 1);
|
||||||
|
assertTrue(tableLock.getWaitingProcedures().isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testListLocksTable() throws Exception {
|
||||||
|
LockProcedure procedure = createExclusiveLockProcedure(2);
|
||||||
|
queue.waitTableExclusiveLock(procedure, TableName.valueOf("ns2", "table2"));
|
||||||
|
|
||||||
|
List<LockInfo> locks = queue.listLocks();
|
||||||
|
assertEquals(2, locks.size());
|
||||||
|
|
||||||
|
LockInfo namespaceLock = locks.get(0);
|
||||||
|
assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns2");
|
||||||
|
assertSharedLock(namespaceLock, 1);
|
||||||
|
assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
|
||||||
|
|
||||||
|
LockInfo tableLock = locks.get(1);
|
||||||
|
assertLockResource(tableLock, LockInfo.ResourceType.TABLE, "ns2:table2");
|
||||||
|
assertExclusiveLock(tableLock, 2);
|
||||||
|
assertTrue(tableLock.getWaitingProcedures().isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testListLocksRegion() throws Exception {
|
||||||
|
LockProcedure procedure = createExclusiveLockProcedure(3);
|
||||||
|
HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf("ns3", "table3"));
|
||||||
|
|
||||||
|
queue.waitRegion(procedure, regionInfo);
|
||||||
|
|
||||||
|
List<LockInfo> locks = queue.listLocks();
|
||||||
|
assertEquals(3, locks.size());
|
||||||
|
|
||||||
|
LockInfo namespaceLock = locks.get(0);
|
||||||
|
assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns3");
|
||||||
|
assertSharedLock(namespaceLock, 1);
|
||||||
|
assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
|
||||||
|
|
||||||
|
LockInfo tableLock = locks.get(1);
|
||||||
|
assertLockResource(tableLock, LockInfo.ResourceType.TABLE, "ns3:table3");
|
||||||
|
assertSharedLock(tableLock, 1);
|
||||||
|
assertTrue(tableLock.getWaitingProcedures().isEmpty());
|
||||||
|
|
||||||
|
LockInfo regionLock = locks.get(2);
|
||||||
|
assertLockResource(regionLock, LockInfo.ResourceType.REGION, regionInfo.getEncodedName());
|
||||||
|
assertExclusiveLock(regionLock, 3);
|
||||||
|
assertTrue(regionLock.getWaitingProcedures().isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testListLocksWaiting() throws Exception {
|
||||||
|
LockProcedure procedure1 = createExclusiveLockProcedure(1);
|
||||||
|
queue.waitTableExclusiveLock(procedure1, TableName.valueOf("ns4", "table4"));
|
||||||
|
|
||||||
|
LockProcedure procedure2 = createSharedLockProcedure(2);
|
||||||
|
queue.waitTableSharedLock(procedure2, TableName.valueOf("ns4", "table4"));
|
||||||
|
|
||||||
|
LockProcedure procedure3 = createExclusiveLockProcedure(3);
|
||||||
|
queue.waitTableExclusiveLock(procedure3, TableName.valueOf("ns4", "table4"));
|
||||||
|
|
||||||
|
List<LockInfo> locks = queue.listLocks();
|
||||||
|
assertEquals(2, locks.size());
|
||||||
|
|
||||||
|
LockInfo namespaceLock = locks.get(0);
|
||||||
|
assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns4");
|
||||||
|
assertSharedLock(namespaceLock, 1);
|
||||||
|
assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
|
||||||
|
|
||||||
|
LockInfo tableLock = locks.get(1);
|
||||||
|
assertLockResource(tableLock, LockInfo.ResourceType.TABLE, "ns4:table4");
|
||||||
|
assertExclusiveLock(tableLock, 1);
|
||||||
|
|
||||||
|
List<WaitingProcedure> waitingProcedures = tableLock.getWaitingProcedures();
|
||||||
|
assertEquals(2, waitingProcedures.size());
|
||||||
|
|
||||||
|
WaitingProcedure waitingProcedure1 = waitingProcedures.get(0);
|
||||||
|
assertEquals(LockInfo.LockType.SHARED, waitingProcedure1.getLockType());
|
||||||
|
assertEquals(2, waitingProcedure1.getProcedure().getProcId());
|
||||||
|
|
||||||
|
WaitingProcedure waitingProcedure2 = waitingProcedures.get(1);
|
||||||
|
assertEquals(LockInfo.LockType.EXCLUSIVE, waitingProcedure2.getLockType());
|
||||||
|
assertEquals(3, waitingProcedure2.getProcedure().getProcId());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,21 +20,24 @@ package org.apache.hadoop.hbase.protobuf;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellComparator;
|
import org.apache.hadoop.hbase.CellComparator;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.ProcedureInfo;
|
||||||
|
import org.apache.hadoop.hbase.ProcedureState;
|
||||||
import org.apache.hadoop.hbase.ByteBufferKeyValue;
|
import org.apache.hadoop.hbase.ByteBufferKeyValue;
|
||||||
import org.apache.hadoop.hbase.client.Append;
|
import org.apache.hadoop.hbase.client.Append;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.Increment;
|
import org.apache.hadoop.hbase.client.Increment;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
|
||||||
|
@ -336,4 +339,40 @@ public class TestProtobufUtil {
|
||||||
Cell newOffheapKV = org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toCell(cell);
|
Cell newOffheapKV = org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toCell(cell);
|
||||||
assertTrue(CellComparator.COMPARATOR.compare(offheapKV, newOffheapKV) == 0);
|
assertTrue(CellComparator.COMPARATOR.compare(offheapKV, newOffheapKV) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static ProcedureInfo createProcedureInfo(long procId)
|
||||||
|
{
|
||||||
|
return new ProcedureInfo(procId, "java.lang.Object", null,
|
||||||
|
ProcedureState.RUNNABLE, -1, null, null, 0, 0, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertProcedureInfoEquals(ProcedureInfo expected,
|
||||||
|
ProcedureInfo result)
|
||||||
|
{
|
||||||
|
if (expected == result) {
|
||||||
|
return;
|
||||||
|
} else if (expected == null || result == null) {
|
||||||
|
fail();
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEquals(expected.getProcId(), result.getProcId());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertLockInfoEquals(LockInfo expected, LockInfo result)
|
||||||
|
{
|
||||||
|
assertEquals(expected.getResourceType(), result.getResourceType());
|
||||||
|
assertEquals(expected.getResourceName(), result.getResourceName());
|
||||||
|
assertEquals(expected.getLockType(), result.getLockType());
|
||||||
|
assertProcedureInfoEquals(expected.getExclusiveLockOwnerProcedure(),
|
||||||
|
result.getExclusiveLockOwnerProcedure());
|
||||||
|
assertEquals(expected.getSharedLockCount(), result.getSharedLockCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertWaitingProcedureEquals(
|
||||||
|
LockInfo.WaitingProcedure expected, LockInfo.WaitingProcedure result)
|
||||||
|
{
|
||||||
|
assertEquals(expected.getLockType(), result.getLockType());
|
||||||
|
assertProcedureInfoEquals(expected.getProcedure(),
|
||||||
|
result.getProcedure());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,151 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.shaded.protobuf;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.ProcedureInfo;
|
||||||
|
import org.apache.hadoop.hbase.ProcedureState;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.LockInfo;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
@Category(SmallTests.class)
|
||||||
|
public class TestProtobufUtil {
|
||||||
|
public TestProtobufUtil() {
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ProcedureInfo createProcedureInfo(long procId)
|
||||||
|
{
|
||||||
|
return new ProcedureInfo(procId, "java.lang.Object", null,
|
||||||
|
ProcedureState.RUNNABLE, -1, null, null, 0, 0, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertProcedureInfoEquals(ProcedureInfo expected,
|
||||||
|
ProcedureInfo result)
|
||||||
|
{
|
||||||
|
if (expected == result) {
|
||||||
|
return;
|
||||||
|
} else if (expected == null || result == null) {
|
||||||
|
fail();
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEquals(expected.getProcId(), result.getProcId());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertLockInfoEquals(LockInfo expected, LockInfo result)
|
||||||
|
{
|
||||||
|
assertEquals(expected.getResourceType(), result.getResourceType());
|
||||||
|
assertEquals(expected.getResourceName(), result.getResourceName());
|
||||||
|
assertEquals(expected.getLockType(), result.getLockType());
|
||||||
|
assertProcedureInfoEquals(expected.getExclusiveLockOwnerProcedure(),
|
||||||
|
result.getExclusiveLockOwnerProcedure());
|
||||||
|
assertEquals(expected.getSharedLockCount(), result.getSharedLockCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertWaitingProcedureEquals(
|
||||||
|
LockInfo.WaitingProcedure expected, LockInfo.WaitingProcedure result)
|
||||||
|
{
|
||||||
|
assertEquals(expected.getLockType(), result.getLockType());
|
||||||
|
assertProcedureInfoEquals(expected.getProcedure(),
|
||||||
|
result.getProcedure());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testServerLockInfo() {
|
||||||
|
LockInfo lock = new LockInfo();
|
||||||
|
lock.setResourceType(LockInfo.ResourceType.SERVER);
|
||||||
|
lock.setResourceName("server");
|
||||||
|
lock.setLockType(LockInfo.LockType.SHARED);
|
||||||
|
lock.setSharedLockCount(2);
|
||||||
|
|
||||||
|
LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
|
||||||
|
LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
|
||||||
|
|
||||||
|
assertLockInfoEquals(lock, lock2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNamespaceLockInfo() {
|
||||||
|
LockInfo lock = new LockInfo();
|
||||||
|
lock.setResourceType(LockInfo.ResourceType.NAMESPACE);
|
||||||
|
lock.setResourceName("ns");
|
||||||
|
lock.setLockType(LockInfo.LockType.EXCLUSIVE);
|
||||||
|
lock.setExclusiveLockOwnerProcedure(createProcedureInfo(2));
|
||||||
|
|
||||||
|
LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
|
||||||
|
LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
|
||||||
|
|
||||||
|
assertLockInfoEquals(lock, lock2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTableLockInfo() {
|
||||||
|
LockInfo lock = new LockInfo();
|
||||||
|
lock.setResourceType(LockInfo.ResourceType.TABLE);
|
||||||
|
lock.setResourceName("table");
|
||||||
|
lock.setLockType(LockInfo.LockType.SHARED);
|
||||||
|
lock.setSharedLockCount(2);
|
||||||
|
|
||||||
|
LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
|
||||||
|
LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
|
||||||
|
|
||||||
|
assertLockInfoEquals(lock, lock2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRegionLockInfo() {
|
||||||
|
LockInfo lock = new LockInfo();
|
||||||
|
lock.setResourceType(LockInfo.ResourceType.REGION);
|
||||||
|
lock.setResourceName("region");
|
||||||
|
lock.setLockType(LockInfo.LockType.EXCLUSIVE);
|
||||||
|
lock.setExclusiveLockOwnerProcedure(createProcedureInfo(2));
|
||||||
|
|
||||||
|
LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
|
||||||
|
LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
|
||||||
|
|
||||||
|
assertLockInfoEquals(lock, lock2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testExclusiveWaitingLockInfo() {
|
||||||
|
LockInfo.WaitingProcedure waitingProcedure = new LockInfo.WaitingProcedure();
|
||||||
|
waitingProcedure.setLockType(LockInfo.LockType.EXCLUSIVE);
|
||||||
|
waitingProcedure.setProcedure(createProcedureInfo(1));
|
||||||
|
|
||||||
|
LockServiceProtos.WaitingProcedure proto = ProtobufUtil.toProtoWaitingProcedure(waitingProcedure);
|
||||||
|
LockInfo.WaitingProcedure waitingProcedure2 = ProtobufUtil.toWaitingProcedure(proto);
|
||||||
|
|
||||||
|
assertWaitingProcedureEquals(waitingProcedure, waitingProcedure2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSharedWaitingLockInfo() {
|
||||||
|
LockInfo.WaitingProcedure waitingProcedure = new LockInfo.WaitingProcedure();
|
||||||
|
waitingProcedure.setLockType(LockInfo.LockType.SHARED);
|
||||||
|
waitingProcedure.setProcedure(createProcedureInfo(2));
|
||||||
|
|
||||||
|
LockServiceProtos.WaitingProcedure proto = ProtobufUtil.toProtoWaitingProcedure(waitingProcedure);
|
||||||
|
LockInfo.WaitingProcedure waitingProcedure2 = ProtobufUtil.toWaitingProcedure(proto);
|
||||||
|
|
||||||
|
assertWaitingProcedureEquals(waitingProcedure, waitingProcedure2);
|
||||||
|
}
|
||||||
|
}
|
|
@ -1192,6 +1192,11 @@ module Hbase
|
||||||
@admin.listProcedures()
|
@admin.listProcedures()
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# List all locks
|
||||||
|
def list_locks()
|
||||||
|
@admin.listLocks();
|
||||||
|
end
|
||||||
|
|
||||||
# Parse arguments and update HTableDescriptor accordingly
|
# Parse arguments and update HTableDescriptor accordingly
|
||||||
def update_htd_from_arg(htd, arg)
|
def update_htd_from_arg(htd, arg)
|
||||||
htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER]
|
htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER]
|
||||||
|
|
|
@ -436,10 +436,11 @@ Shell.load_command_group(
|
||||||
|
|
||||||
Shell.load_command_group(
|
Shell.load_command_group(
|
||||||
'procedures',
|
'procedures',
|
||||||
:full_name => 'PROCEDURES MANAGEMENT',
|
:full_name => 'PROCEDURES & LOCKS MANAGEMENT',
|
||||||
:commands => %w[
|
:commands => %w[
|
||||||
abort_procedure
|
abort_procedure
|
||||||
list_procedures
|
list_procedures
|
||||||
|
list_locks
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -98,6 +98,11 @@ module Shell
|
||||||
@formatter ||= ::Shell::Formatter::Console.new
|
@formatter ||= ::Shell::Formatter::Console.new
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# for testing purposes to catch the output of the commands
|
||||||
|
def set_formatter(formatter)
|
||||||
|
@formatter = formatter
|
||||||
|
end
|
||||||
|
|
||||||
def translate_hbase_exceptions(*args)
|
def translate_hbase_exceptions(*args)
|
||||||
yield
|
yield
|
||||||
rescue => e
|
rescue => e
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
module Shell
|
||||||
|
module Commands
|
||||||
|
class ListLocks < Command
|
||||||
|
def help
|
||||||
|
return <<-EOF
|
||||||
|
List all locks in hbase. Examples:
|
||||||
|
|
||||||
|
hbase> list_locks
|
||||||
|
EOF
|
||||||
|
end
|
||||||
|
|
||||||
|
def command()
|
||||||
|
list = admin.list_locks()
|
||||||
|
|
||||||
|
list.each do |lock|
|
||||||
|
formatter.output_strln("#{lock.resourceType}(#{lock.resourceName})")
|
||||||
|
|
||||||
|
case lock.lockType
|
||||||
|
when org.apache.hadoop.hbase.procedure2.LockInfo::LockType::EXCLUSIVE then
|
||||||
|
formatter.output_strln("Lock type: EXCLUSIVE, procedure: #{lock.exclusiveLockOwnerProcedure.procId}")
|
||||||
|
when org.apache.hadoop.hbase.procedure2.LockInfo::LockType::SHARED then
|
||||||
|
formatter.output_strln("Lock type: SHARED, count: #{lock.sharedLockCount}")
|
||||||
|
end
|
||||||
|
|
||||||
|
if lock.waitingProcedures.any?
|
||||||
|
formatter.output_strln("Waiting procedures:")
|
||||||
|
formatter.header([ "Lock type", "Procedure Id" ])
|
||||||
|
|
||||||
|
lock.waitingProcedures.each do |waitingProcedure|
|
||||||
|
formatter.row([ waitingProcedure.lockType.to_s, waitingProcedure.procedure.procId.to_s ]);
|
||||||
|
end
|
||||||
|
|
||||||
|
formatter.footer(lock.waitingProcedures.size)
|
||||||
|
end
|
||||||
|
|
||||||
|
formatter.output_strln("");
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -17,6 +17,8 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
|
require 'stringio'
|
||||||
|
|
||||||
# Results formatter
|
# Results formatter
|
||||||
module Shell
|
module Shell
|
||||||
module Formatter
|
module Formatter
|
||||||
|
@ -25,7 +27,7 @@ module Shell
|
||||||
attr_reader :row_count
|
attr_reader :row_count
|
||||||
|
|
||||||
def is_valid_io?(obj)
|
def is_valid_io?(obj)
|
||||||
obj.instance_of?(IO) || obj == Kernel
|
obj.instance_of?(IO) || obj.instance_of?(StringIO) || obj == Kernel
|
||||||
end
|
end
|
||||||
|
|
||||||
def refresh_width()
|
def refresh_width()
|
||||||
|
@ -166,6 +168,11 @@ module Shell
|
||||||
output(@max_width, str)
|
output(@max_width, str)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def output_strln(str)
|
||||||
|
output_str(str)
|
||||||
|
@out.puts
|
||||||
|
end
|
||||||
|
|
||||||
def output(width, str)
|
def output(width, str)
|
||||||
if str == nil
|
if str == nil
|
||||||
str = ''
|
str = ''
|
||||||
|
|
|
@ -0,0 +1,152 @@
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
require 'hbase_constants'
|
||||||
|
require 'shell'
|
||||||
|
|
||||||
|
class ListLocksTest < Test::Unit::TestCase
|
||||||
|
def setup
|
||||||
|
@hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
|
||||||
|
@shell = Shell::Shell.new(@hbase)
|
||||||
|
@master = $TEST_CLUSTER.getHBaseClusterInterface.getMaster
|
||||||
|
@scheduler = @master.getMasterProcedureExecutor.getEnvironment.getProcedureScheduler
|
||||||
|
|
||||||
|
@string_io = StringIO.new
|
||||||
|
|
||||||
|
@list_locks = Shell::Commands::ListLocks.new(@shell)
|
||||||
|
@list_locks.set_formatter(Shell::Formatter::Base.new({ :output_stream => @string_io }))
|
||||||
|
end
|
||||||
|
|
||||||
|
def set_field(object, field_name, value)
|
||||||
|
field = object.getClass.getDeclaredField(field_name)
|
||||||
|
field.setAccessible(true)
|
||||||
|
field.set(object, value)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_lock(type, proc_id)
|
||||||
|
lock = org.apache.hadoop.hbase.master.locking.LockProcedure.new()
|
||||||
|
set_field(lock, "type", type)
|
||||||
|
lock.procId = proc_id
|
||||||
|
|
||||||
|
return lock
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_exclusive_lock(proc_id)
|
||||||
|
return create_lock(org.apache.hadoop.hbase.master.locking.LockProcedure::LockType::EXCLUSIVE, proc_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_shared_lock(proc_id)
|
||||||
|
return create_lock(org.apache.hadoop.hbase.master.locking.LockProcedure::LockType::SHARED, proc_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
define_test "list server locks" do
|
||||||
|
lock = create_exclusive_lock(0)
|
||||||
|
|
||||||
|
server_name = org.apache.hadoop.hbase.ServerName.valueOf("server1,1234,0")
|
||||||
|
|
||||||
|
@scheduler.waitServerExclusiveLock(lock, server_name)
|
||||||
|
@list_locks.command()
|
||||||
|
@scheduler.wakeServerExclusiveLock(lock, server_name)
|
||||||
|
|
||||||
|
assert_equal(
|
||||||
|
"SERVER(server1,1234,0)\n" <<
|
||||||
|
"Lock type: EXCLUSIVE, procedure: 0\n\n",
|
||||||
|
@string_io.string)
|
||||||
|
end
|
||||||
|
|
||||||
|
define_test "list namespace locks" do
|
||||||
|
lock = create_exclusive_lock(1)
|
||||||
|
|
||||||
|
@scheduler.waitNamespaceExclusiveLock(lock, "ns1")
|
||||||
|
@list_locks.command()
|
||||||
|
@scheduler.wakeNamespaceExclusiveLock(lock, "ns1")
|
||||||
|
|
||||||
|
assert_equal(
|
||||||
|
"NAMESPACE(ns1)\n" <<
|
||||||
|
"Lock type: EXCLUSIVE, procedure: 1\n\n" <<
|
||||||
|
"TABLE(hbase:namespace)\n" <<
|
||||||
|
"Lock type: SHARED, count: 1\n\n",
|
||||||
|
@string_io.string)
|
||||||
|
end
|
||||||
|
|
||||||
|
define_test "list table locks" do
|
||||||
|
lock = create_exclusive_lock(2)
|
||||||
|
|
||||||
|
table_name = org.apache.hadoop.hbase.TableName.valueOf("ns2", "table2")
|
||||||
|
|
||||||
|
@scheduler.waitTableExclusiveLock(lock, table_name)
|
||||||
|
@list_locks.command()
|
||||||
|
@scheduler.wakeTableExclusiveLock(lock, table_name)
|
||||||
|
|
||||||
|
assert_equal(
|
||||||
|
"NAMESPACE(ns2)\n" <<
|
||||||
|
"Lock type: SHARED, count: 1\n\n" <<
|
||||||
|
"TABLE(ns2:table2)\n" <<
|
||||||
|
"Lock type: EXCLUSIVE, procedure: 2\n\n",
|
||||||
|
@string_io.string)
|
||||||
|
end
|
||||||
|
|
||||||
|
define_test "list region locks" do
|
||||||
|
lock = create_exclusive_lock(3)
|
||||||
|
|
||||||
|
table_name = org.apache.hadoop.hbase.TableName.valueOf("ns3", "table3")
|
||||||
|
region_info = org.apache.hadoop.hbase.HRegionInfo.new(table_name)
|
||||||
|
|
||||||
|
@scheduler.waitRegion(lock, region_info)
|
||||||
|
@list_locks.command()
|
||||||
|
@scheduler.wakeRegion(lock, region_info)
|
||||||
|
|
||||||
|
assert_equal(
|
||||||
|
"NAMESPACE(ns3)\n" <<
|
||||||
|
"Lock type: SHARED, count: 1\n\n" <<
|
||||||
|
"TABLE(ns3:table3)\n" <<
|
||||||
|
"Lock type: SHARED, count: 1\n\n" <<
|
||||||
|
"REGION(" << region_info.getEncodedName << ")\n" <<
|
||||||
|
"Lock type: EXCLUSIVE, procedure: 3\n\n",
|
||||||
|
@string_io.string)
|
||||||
|
end
|
||||||
|
|
||||||
|
define_test "list waiting locks" do
|
||||||
|
table_name = org.apache.hadoop.hbase.TableName.valueOf("ns4", "table4")
|
||||||
|
|
||||||
|
lock1 = create_exclusive_lock(1)
|
||||||
|
set_field(lock1, "tableName", table_name)
|
||||||
|
|
||||||
|
lock2 = create_shared_lock(2)
|
||||||
|
set_field(lock2, "tableName", table_name)
|
||||||
|
|
||||||
|
@scheduler.waitTableExclusiveLock(lock1, table_name)
|
||||||
|
@scheduler.waitTableSharedLock(lock2, table_name)
|
||||||
|
@list_locks.command()
|
||||||
|
@scheduler.wakeTableExclusiveLock(lock1, table_name)
|
||||||
|
@scheduler.wakeTableSharedLock(lock2, table_name)
|
||||||
|
|
||||||
|
assert_equal(
|
||||||
|
"NAMESPACE(ns4)\n" <<
|
||||||
|
"Lock type: SHARED, count: 1\n\n" <<
|
||||||
|
"TABLE(ns4:table4)\n" <<
|
||||||
|
"Lock type: EXCLUSIVE, procedure: 1\n" <<
|
||||||
|
"Waiting procedures:\n" <<
|
||||||
|
"Lock type Procedure Id\n" <<
|
||||||
|
" SHARED 2\n" <<
|
||||||
|
"1 row(s)\n\n",
|
||||||
|
@string_io.string)
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
Loading…
Reference in New Issue