HBASE-19131 Add the ClusterStatus hook and cleanup other hooks which can be replaced by ClusterStatus hook

This commit is contained in:
Chia-Ping Tsai 2017-11-05 09:56:04 +08:00
parent 349dd5e214
commit 2085958216
21 changed files with 163 additions and 139 deletions

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
@ -74,9 +75,9 @@ public class ClusterStatus {
private String hbaseVersion; private String hbaseVersion;
private Map<ServerName, ServerLoad> liveServers; private Map<ServerName, ServerLoad> liveServers;
private Collection<ServerName> deadServers; private List<ServerName> deadServers;
private ServerName master; private ServerName master;
private Collection<ServerName> backupMasters; private List<ServerName> backupMasters;
private List<RegionState> intransition; private List<RegionState> intransition;
private String clusterId; private String clusterId;
private String[] masterCoprocessors; private String[] masterCoprocessors;
@ -101,6 +102,27 @@ public class ClusterStatus {
// TODO: make this constructor private // TODO: make this constructor private
this.hbaseVersion = hbaseVersion; this.hbaseVersion = hbaseVersion;
this.liveServers = servers; this.liveServers = servers;
this.deadServers = new ArrayList<>(deadServers);
this.master = master;
this.backupMasters = new ArrayList<>(backupMasters);
this.intransition = rit;
this.clusterId = clusterid;
this.masterCoprocessors = masterCoprocessors;
this.balancerOn = balancerOn;
this.masterInfoPort = masterInfoPort;
}
private ClusterStatus(final String hbaseVersion, final String clusterid,
final Map<ServerName, ServerLoad> servers,
final List<ServerName> deadServers,
final ServerName master,
final List<ServerName> backupMasters,
final List<RegionState> rit,
final String[] masterCoprocessors,
final Boolean balancerOn,
final int masterInfoPort) {
this.hbaseVersion = hbaseVersion;
this.liveServers = servers;
this.deadServers = deadServers; this.deadServers = deadServers;
this.master = master; this.master = master;
this.backupMasters = backupMasters; this.backupMasters = backupMasters;
@ -114,11 +136,11 @@ public class ClusterStatus {
/** /**
* @return the names of region servers on the dead list * @return the names of region servers on the dead list
*/ */
public Collection<ServerName> getDeadServerNames() { public List<ServerName> getDeadServerNames() {
if (deadServers == null) { if (deadServers == null) {
return Collections.<ServerName>emptyList(); return Collections.EMPTY_LIST;
} }
return Collections.unmodifiableCollection(deadServers); return Collections.unmodifiableList(deadServers);
} }
/** /**
@ -234,7 +256,7 @@ public class ClusterStatus {
public Collection<ServerName> getServers() { public Collection<ServerName> getServers() {
if (liveServers == null) { if (liveServers == null) {
return Collections.<ServerName>emptyList(); return Collections.EMPTY_LIST;
} }
return Collections.unmodifiableCollection(this.liveServers.keySet()); return Collections.unmodifiableCollection(this.liveServers.keySet());
} }
@ -257,11 +279,11 @@ public class ClusterStatus {
/** /**
* @return the names of backup masters * @return the names of backup masters
*/ */
public Collection<ServerName> getBackupMasters() { public List<ServerName> getBackupMasters() {
if (backupMasters == null) { if (backupMasters == null) {
return Collections.<ServerName>emptyList(); return Collections.EMPTY_LIST;
} }
return Collections.unmodifiableCollection(this.backupMasters); return Collections.unmodifiableList(this.backupMasters);
} }
/** /**
@ -363,6 +385,7 @@ public class ClusterStatus {
return sb.toString(); return sb.toString();
} }
@InterfaceAudience.Private
public static Builder newBuilder() { public static Builder newBuilder() {
return new Builder(); return new Builder();
} }
@ -374,9 +397,9 @@ public class ClusterStatus {
public static class Builder { public static class Builder {
private String hbaseVersion = null; private String hbaseVersion = null;
private Map<ServerName, ServerLoad> liveServers = null; private Map<ServerName, ServerLoad> liveServers = null;
private Collection<ServerName> deadServers = null; private List<ServerName> deadServers = null;
private ServerName master = null; private ServerName master = null;
private Collection<ServerName> backupMasters = null; private List<ServerName> backupMasters = null;
private List<RegionState> intransition = null; private List<RegionState> intransition = null;
private String clusterId = null; private String clusterId = null;
private String[] masterCoprocessors = null; private String[] masterCoprocessors = null;
@ -395,7 +418,7 @@ public class ClusterStatus {
return this; return this;
} }
public Builder setDeadServers(Collection<ServerName> deadServers) { public Builder setDeadServers(List<ServerName> deadServers) {
this.deadServers = deadServers; this.deadServers = deadServers;
return this; return this;
} }
@ -405,7 +428,7 @@ public class ClusterStatus {
return this; return this;
} }
public Builder setBackupMasters(Collection<ServerName> backupMasters) { public Builder setBackupMasters(List<ServerName> backupMasters) {
this.backupMasters = backupMasters; this.backupMasters = backupMasters;
return this; return this;
} }

View File

@ -2486,7 +2486,9 @@ public interface Admin extends Abortable, Closeable {
* List dead region servers. * List dead region servers.
* @return List of dead region servers. * @return List of dead region servers.
*/ */
List<ServerName> listDeadServers() throws IOException; default List<ServerName> listDeadServers() throws IOException {
return getClusterStatus(EnumSet.of(Option.DEAD_SERVERS)).getDeadServerNames();
}
/** /**
* Clear dead region servers from master. * Clear dead region servers from master.

View File

@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import com.google.protobuf.RpcChannel; import com.google.protobuf.RpcChannel;
@ -1080,9 +1079,11 @@ public interface AsyncAdmin {
/** /**
* List all the dead region servers. * List all the dead region servers.
* @return - returns a list of dead region servers wrapped by a {@link CompletableFuture}.
*/ */
CompletableFuture<List<ServerName>> listDeadServers(); default CompletableFuture<List<ServerName>> listDeadServers() {
return this.getClusterStatus(EnumSet.of(Option.DEAD_SERVERS))
.thenApply(ClusterStatus::getDeadServerNames);
}
/** /**
* Clear dead region servers from master. * Clear dead region servers from master.

View File

@ -1777,12 +1777,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
return stub.getQuotaStates(controller, request); return stub.getQuotaStates(controller, request);
} }
@Override
public MasterProtos.ListDeadServersResponse listDeadServers(RpcController controller,
MasterProtos.ListDeadServersRequest request) throws ServiceException {
return stub.listDeadServers(controller, request);
}
@Override @Override
public MasterProtos.ClearDeadServersResponse clearDeadServers(RpcController controller, public MasterProtos.ClearDeadServersResponse clearDeadServers(RpcController controller,
MasterProtos.ClearDeadServersRequest request) throws ServiceException { MasterProtos.ClearDeadServersRequest request) throws ServiceException {

View File

@ -170,7 +170,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedur
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDeadServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
@ -2100,7 +2099,7 @@ public class HBaseAdmin implements Admin {
} }
/** /**
* Do a get with a timeout against the passed in <code>future<code>. * Do a get with a timeout against the passed in <code>future</code>.
*/ */
private static <T> T get(final Future<T> future, final long timeout, final TimeUnit units) private static <T> T get(final Future<T> future, final long timeout, final TimeUnit units)
throws IOException { throws IOException {
@ -4346,19 +4345,6 @@ public class HBaseAdmin implements Admin {
ProtobufUtil.call(callable); ProtobufUtil.call(callable);
} }
@Override
public List<ServerName> listDeadServers() throws IOException {
return executeCallable(new MasterCallable<List<ServerName>>(getConnection(),
getRpcControllerFactory()) {
@Override
public List<ServerName> rpcCall() throws ServiceException {
ListDeadServersRequest req = ListDeadServersRequest.newBuilder().build();
return ProtobufUtil.toServerNameList(
master.listDeadServers(getRpcController(), req).getServerNameList());
}
});
}
@Override @Override
public List<ServerName> clearDeadServers(final List<ServerName> servers) throws IOException { public List<ServerName> clearDeadServers(final List<ServerName> servers) throws IOException {
if (servers == null || servers.size() == 0) { if (servers == null || servers.size() == 0) {

View File

@ -156,8 +156,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedu
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
@ -178,8 +176,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshot
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDeadServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDeadServersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
@ -2837,17 +2833,6 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
return future; return future;
} }
@Override
public CompletableFuture<List<ServerName>> listDeadServers() {
return this.<List<ServerName>> newMasterCaller()
.action((controller, stub) -> this
.<ListDeadServersRequest, ListDeadServersResponse, List<ServerName>> call(
controller, stub, ListDeadServersRequest.newBuilder().build(),
(s, c, req, done) -> s.listDeadServers(c, req, done),
(resp) -> ProtobufUtil.toServerNameList(resp.getServerNameList())))
.call();
}
@Override @Override
public CompletableFuture<List<ServerName>> clearDeadServers(List<ServerName> servers) { public CompletableFuture<List<ServerName>> clearDeadServers(List<ServerName> servers) {
return this.<List<ServerName>> newMasterCaller() return this.<List<ServerName>> newMasterCaller()

View File

@ -92,8 +92,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshot
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDeadServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDeadServersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
@ -635,12 +633,6 @@ public class ShortCircuitMasterConnection implements MasterKeepAliveConnection {
return stub.clearDeadServers(controller, request); return stub.clearDeadServers(controller, request);
} }
@Override
public ListDeadServersResponse listDeadServers(RpcController controller,
ListDeadServersRequest request) throws ServiceException {
return stub.listDeadServers(controller, request);
}
@Override @Override
public SplitTableRegionResponse splitRegion(RpcController controller, SplitTableRegionRequest request) public SplitTableRegionResponse splitRegion(RpcController controller, SplitTableRegionRequest request)
throws ServiceException { throws ServiceException {

View File

@ -24,7 +24,6 @@ import java.lang.reflect.Constructor;
import java.lang.reflect.Method; import java.lang.reflect.Method;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
@ -38,13 +37,10 @@ import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.function.Function; import java.util.function.Function;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ByteBufferCell; import org.apache.hadoop.hbase.ByteBufferCell;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilder;
import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
@ -64,7 +60,6 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.TagUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.ClientUtil; import org.apache.hadoop.hbase.client.ClientUtil;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@ -81,7 +76,6 @@ import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionLoadStats; import org.apache.hadoop.hbase.client.RegionLoadStats;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.SnapshotDescription;
@ -188,6 +182,7 @@ import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.hbase.util.Methods; import org.apache.hadoop.hbase.util.Methods;
import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.yetus.audience.InterfaceAudience;
/** /**
* Protobufs utility. * Protobufs utility.
@ -2964,14 +2959,12 @@ public final class ProtobufUtil {
lsi.getServer()), new ServerLoad(lsi.getServerLoad())); lsi.getServer()), new ServerLoad(lsi.getServerLoad()));
} }
Collection<ServerName> deadServers = null; List<ServerName> deadServers = new ArrayList<>(proto.getDeadServersList().size());
deadServers = new ArrayList<>(proto.getDeadServersList().size());
for (HBaseProtos.ServerName sn : proto.getDeadServersList()) { for (HBaseProtos.ServerName sn : proto.getDeadServersList()) {
deadServers.add(ProtobufUtil.toServerName(sn)); deadServers.add(ProtobufUtil.toServerName(sn));
} }
Collection<ServerName> backupMasters = null; List<ServerName> backupMasters = new ArrayList<>(proto.getBackupMastersList().size());
backupMasters = new ArrayList<>(proto.getBackupMastersList().size());
for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) { for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) {
backupMasters.add(ProtobufUtil.toServerName(sn)); backupMasters.add(ProtobufUtil.toServerName(sn));
} }

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.shaded.protobuf;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Optional; import java.util.Optional;
@ -1813,7 +1814,8 @@ public final class RequestConverter {
return builder.build(); return builder.build();
} }
public static ClearDeadServersRequest buildClearDeadServersRequest(List<ServerName> deadServers) { public static ClearDeadServersRequest buildClearDeadServersRequest(
Collection<ServerName> deadServers) {
ClearDeadServersRequest.Builder builder = ClearDeadServersRequest.newBuilder(); ClearDeadServersRequest.Builder builder = ClearDeadServersRequest.newBuilder();
for(ServerName server: deadServers) { for(ServerName server: deadServers) {
builder.addServerName(ProtobufUtil.toServerName(server)); builder.addServerName(ProtobufUtil.toServerName(server));

View File

@ -625,13 +625,6 @@ message RecommissionRegionServerRequest {
message RecommissionRegionServerResponse { message RecommissionRegionServerResponse {
} }
message ListDeadServersRequest {
}
message ListDeadServersResponse {
repeated ServerName server_name = 1;
}
message ClearDeadServersRequest { message ClearDeadServersRequest {
repeated ServerName server_name = 1; repeated ServerName server_name = 1;
} }
@ -993,7 +986,4 @@ service MasterService {
rpc ClearDeadServers(ClearDeadServersRequest) rpc ClearDeadServers(ClearDeadServersRequest)
returns(ClearDeadServersResponse); returns(ClearDeadServersResponse);
/** Returns a list of Dead Servers. */
rpc ListDeadServers(ListDeadServersRequest)
returns(ListDeadServersResponse);
} }

View File

@ -54,7 +54,8 @@ MasterAddressTracker masterAddressTracker = master.getMasterAddressTracker();
<th>Start Time</th> <th>Start Time</th>
</tr> </tr>
<%java> <%java>
Collection<ServerName> backup_masters = master.getClusterStatus().getBackupMasters(); Collection<ServerName> backup_masters = master.getClusterStatusWithoutCoprocessor(
EnumSet.of(ClusterStatus.Option.BACKUP_MASTERS)).getBackupMasters();
ServerName [] backupServerNames = backup_masters.toArray(new ServerName[backup_masters.size()]); ServerName [] backupServerNames = backup_masters.toArray(new ServerName[backup_masters.size()]);
Arrays.sort(backupServerNames); Arrays.sort(backupServerNames);
for (ServerName serverName : backupServerNames) { for (ServerName serverName : backupServerNames) {

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.MetaMutationAnnotation; import org.apache.hadoop.hbase.MetaMutationAnnotation;
import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor;
@ -1242,16 +1243,16 @@ public interface MasterObserver {
throws IOException {} throws IOException {}
/** /**
* Called before list dead region servers. * Called before get cluster status.
*/ */
default void preListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx) default void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {} throws IOException {}
/** /**
* Called after list dead region servers. * Called after get cluster status.
*/ */
default void postListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx) default void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
throws IOException {} ClusterStatus status) throws IOException {}
/** /**
* Called before clear dead region servers. * Called before clear dead region servers.

View File

@ -18,6 +18,7 @@
*/ */
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import com.google.common.base.Enums;
import javax.servlet.ServletException; import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
@ -834,7 +835,7 @@ public class HMaster extends HRegionServer implements MasterServices {
//initialize load balancer //initialize load balancer
this.balancer.setMasterServices(this); this.balancer.setMasterServices(this);
this.balancer.setClusterStatus(getClusterStatus()); this.balancer.setClusterStatus(getClusterStatusWithoutCoprocessor());
this.balancer.initialize(); this.balancer.initialize();
// Check if master is shutting down because of some issue // Check if master is shutting down because of some issue
@ -878,7 +879,7 @@ public class HMaster extends HRegionServer implements MasterServices {
this.assignmentManager.joinCluster(); this.assignmentManager.joinCluster();
// set cluster status again after user regions are assigned // set cluster status again after user regions are assigned
this.balancer.setClusterStatus(getClusterStatus()); this.balancer.setClusterStatus(getClusterStatusWithoutCoprocessor());
// Start balancer and meta catalog janitor after meta and regions have been assigned. // Start balancer and meta catalog janitor after meta and regions have been assigned.
status.setStatus("Starting balancer and catalog janitor"); status.setStatus("Starting balancer and catalog janitor");
@ -1412,7 +1413,7 @@ public class HMaster extends HRegionServer implements MasterServices {
List<RegionPlan> plans = new ArrayList<>(); List<RegionPlan> plans = new ArrayList<>();
//Give the balancer the current cluster state. //Give the balancer the current cluster state.
this.balancer.setClusterStatus(getClusterStatus()); this.balancer.setClusterStatus(getClusterStatusWithoutCoprocessor());
this.balancer.setClusterLoad(assignmentsByTable); this.balancer.setClusterLoad(assignmentsByTable);
for (Map<ServerName, List<RegionInfo>> serverMap : assignmentsByTable.values()) { for (Map<ServerName, List<RegionInfo>> serverMap : assignmentsByTable.values()) {
@ -2411,17 +2412,12 @@ public class HMaster extends HRegionServer implements MasterServices {
} }
} }
/** public ClusterStatus getClusterStatusWithoutCoprocessor() throws InterruptedIOException {
* @return cluster status return getClusterStatusWithoutCoprocessor(EnumSet.allOf(Option.class));
*/
public ClusterStatus getClusterStatus() throws InterruptedIOException {
return getClusterStatus(EnumSet.allOf(Option.class));
} }
/** public ClusterStatus getClusterStatusWithoutCoprocessor(EnumSet<Option> options)
* @return cluster status throws InterruptedIOException {
*/
public ClusterStatus getClusterStatus(EnumSet<Option> options) throws InterruptedIOException {
ClusterStatus.Builder builder = ClusterStatus.newBuilder(); ClusterStatus.Builder builder = ClusterStatus.newBuilder();
// given that hbase1 can't submit the request with Option, // given that hbase1 can't submit the request with Option,
// we return all information to client if the list of Option is empty. // we return all information to client if the list of Option is empty.
@ -2443,7 +2439,8 @@ public class HMaster extends HRegionServer implements MasterServices {
} }
case DEAD_SERVERS: { case DEAD_SERVERS: {
if (serverManager != null) { if (serverManager != null) {
builder.setDeadServers(serverManager.getDeadServers().copyServerNames()); builder.setDeadServers(new ArrayList<>(
serverManager.getDeadServers().copyServerNames()));
} }
break; break;
} }
@ -2476,6 +2473,26 @@ public class HMaster extends HRegionServer implements MasterServices {
return builder.build(); return builder.build();
} }
/**
* @return cluster status
*/
public ClusterStatus getClusterStatus() throws IOException {
return getClusterStatus(EnumSet.allOf(Option.class));
}
public ClusterStatus getClusterStatus(EnumSet<Option> options) throws IOException {
if (cpHost != null) {
cpHost.preGetClusterStatus();
}
ClusterStatus status = getClusterStatusWithoutCoprocessor(options);
LOG.info(getClientIdAuditPrefix() + " get ClusterStatus, status=" + status
+ ", options=" + options);
if (cpHost != null) {
cpHost.postGetClusterStatus(status);
}
return status;
}
private List<ServerName> getBackupMasters() throws InterruptedIOException { private List<ServerName> getBackupMasters() throws InterruptedIOException {
// Build Set of backup masters from ZK nodes // Build Set of backup masters from ZK nodes
List<String> backupMasterStrings; List<String> backupMasterStrings;
@ -3164,12 +3181,14 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override @Override
public long getLastMajorCompactionTimestamp(TableName table) throws IOException { public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
return getClusterStatus().getLastMajorCompactionTsForTable(table); return getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
.getLastMajorCompactionTsForTable(table);
} }
@Override @Override
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException { public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
return getClusterStatus().getLastMajorCompactionTsForRegion(regionName); return getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
.getLastMajorCompactionTsForRegion(regionName);
} }
/** /**

View File

@ -26,6 +26,7 @@ import com.google.protobuf.Service;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.MetaMutationAnnotation; import org.apache.hadoop.hbase.MetaMutationAnnotation;
import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
@ -1565,20 +1566,20 @@ public class MasterCoprocessorHost
}); });
} }
public void preListDeadServers() throws IOException { public void preGetClusterStatus() throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
@Override @Override
public void call(MasterObserver observer) throws IOException { public void call(MasterObserver observer) throws IOException {
observer.preListDeadServers(this); observer.preGetClusterStatus(this);
} }
}); });
} }
public void postListDeadServers() throws IOException { public void postGetClusterStatus(ClusterStatus status) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
@Override @Override
public void call(MasterObserver observer) throws IOException { public void call(MasterObserver observer) throws IOException {
observer.postListDeadServers(this); observer.postGetClusterStatus(this, status);
} }
}); });
} }

View File

@ -176,8 +176,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshot
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDeadServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDeadServersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
@ -2140,25 +2138,6 @@ public class MasterRpcServices extends RSRpcServices
} }
} }
@Override
public ListDeadServersResponse listDeadServers(RpcController controller,
ListDeadServersRequest request) throws ServiceException {
LOG.debug(master.getClientIdAuditPrefix() + " list dead region servers.");
ListDeadServersResponse.Builder response = ListDeadServersResponse.newBuilder();
try {
master.checkInitialized();
Set<ServerName> servers = master.getServerManager().getDeadServers().copyServerNames();
for (ServerName server : servers) {
response.addServerName(ProtobufUtil.toServerName(server));
}
} catch (IOException io) {
throw new ServiceException(io);
}
return response.build();
}
@Override @Override
public ClearDeadServersResponse clearDeadServers(RpcController controller, public ClearDeadServersResponse clearDeadServers(RpcController controller,
ClearDeadServersRequest request) throws ServiceException { ClearDeadServersRequest request) throws ServiceException {

View File

@ -46,7 +46,7 @@ public class ClusterStatusChore extends ScheduledChore {
@Override @Override
protected void chore() { protected void chore() {
try { try {
balancer.setClusterStatus(master.getClusterStatus()); balancer.setClusterStatus(master.getClusterStatusWithoutCoprocessor());
} catch (InterruptedIOException e) { } catch (InterruptedIOException e) {
LOG.warn("Ignoring interruption", e); LOG.warn("Ignoring interruption", e);
} }

View File

@ -2760,6 +2760,12 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
checkLockPermissions(getActiveUser(ctx), null, tableName, null, description); checkLockPermissions(getActiveUser(ctx), null, tableName, null, description);
} }
@Override
public void preGetClusterStatus(final ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
requirePermission(getActiveUser(ctx), "getClusterStatus", Action.ADMIN);
}
private void checkLockPermissions(User user, String namespace, private void checkLockPermissions(User user, String namespace,
TableName tableName, RegionInfo[] regionInfos, String reason) TableName tableName, RegionInfo[] regionInfos, String reason)
throws IOException { throws IOException {

View File

@ -17,9 +17,13 @@
*/ */
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ClusterStatus.Option; import org.apache.hadoop.hbase.ClusterStatus.Option;
@ -27,6 +31,11 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
@ -54,6 +63,7 @@ public class TestClientClusterStatus {
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
Configuration conf = HBaseConfiguration.create(); Configuration conf = HBaseConfiguration.create();
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MyObserver.class.getName());
UTIL = new HBaseTestingUtility(conf); UTIL = new HBaseTestingUtility(conf);
UTIL.startMiniCluster(MASTERS, SLAVES); UTIL.startMiniCluster(MASTERS, SLAVES);
CLUSTER = UTIL.getHBaseCluster(); CLUSTER = UTIL.getHBaseCluster();
@ -175,11 +185,11 @@ public class TestClientClusterStatus {
EnumSet.of(Option.MASTER_COPROCESSORS, Option.HBASE_VERSION, EnumSet.of(Option.MASTER_COPROCESSORS, Option.HBASE_VERSION,
Option.CLUSTER_ID, Option.BALANCER_ON); Option.CLUSTER_ID, Option.BALANCER_ON);
ClusterStatus status = ADMIN.getClusterStatus(options); ClusterStatus status = ADMIN.getClusterStatus(options);
Assert.assertTrue(status.getMasterCoprocessors().length == 0); Assert.assertTrue(status.getMasterCoprocessors().length == 1);
Assert.assertNotNull(status.getHBaseVersion()); Assert.assertNotNull(status.getHBaseVersion());
Assert.assertNotNull(status.getClusterId()); Assert.assertNotNull(status.getClusterId());
Assert.assertTrue(status.getAverageLoad() == 0.0); Assert.assertTrue(status.getAverageLoad() == 0.0);
Assert.assertNotNull(status.getBalancerOn() && !status.getBalancerOn()); Assert.assertNotNull(status.getBalancerOn());
} }
@AfterClass @AfterClass
@ -187,4 +197,33 @@ public class TestClientClusterStatus {
if (ADMIN != null) ADMIN.close(); if (ADMIN != null) ADMIN.close();
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }
@Test
public void testObserver() throws IOException {
int preCount = MyObserver.PRE_COUNT.get();
int postCount = MyObserver.POST_COUNT.get();
Assert.assertTrue(Stream.of(ADMIN.getClusterStatus().getMasterCoprocessors())
.anyMatch(s -> s.equals(MyObserver.class.getSimpleName())));
Assert.assertEquals(preCount + 1, MyObserver.PRE_COUNT.get());
Assert.assertEquals(postCount + 1, MyObserver.POST_COUNT.get());
}
public static class MyObserver implements MasterCoprocessor, MasterObserver {
private static final AtomicInteger PRE_COUNT = new AtomicInteger(0);
private static final AtomicInteger POST_COUNT = new AtomicInteger(0);
@Override public Optional<MasterObserver> getMasterObserver() {
return Optional.of(this);
}
@Override public void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
PRE_COUNT.incrementAndGet();
}
@Override public void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
ClusterStatus status) throws IOException {
POST_COUNT.incrementAndGet();
}
}
} }

View File

@ -27,7 +27,6 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -41,7 +40,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -907,7 +905,7 @@ public class TestSplitTransactionOnCluster {
} }
} }
private void waitUntilRegionServerDead() throws InterruptedException, InterruptedIOException { private void waitUntilRegionServerDead() throws InterruptedException, IOException {
// Wait until the master processes the RS shutdown // Wait until the master processes the RS shutdown
for (int i=0; (cluster.getMaster().getClusterStatus().getServers().size() > NB_SERVERS for (int i=0; (cluster.getMaster().getClusterStatus().getServers().size() > NB_SERVERS
|| cluster.getLiveRegionServerThreads().size() > NB_SERVERS) && i<100; i++) { || cluster.getLiveRegionServerThreads().size() > NB_SERVERS) && i<100; i++) {

View File

@ -30,14 +30,12 @@ import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController; import com.google.protobuf.RpcController;
import com.google.protobuf.Service; import com.google.protobuf.Service;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedAction; import java.security.PrivilegedAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -122,6 +120,8 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.Permission.Action; import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.testclassification.SecurityTests;
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles; import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
@ -137,9 +137,6 @@ import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.junit.rules.TestName; import org.junit.rules.TestName;
import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
/** /**
* Performs authorization checks for common operations, according to different * Performs authorization checks for common operations, according to different
* levels of authorized users. * levels of authorized users.
@ -3133,4 +3130,18 @@ public class TestAccessController extends SecureTestUtil {
TEST_UTIL.deleteTable(tname); TEST_UTIL.deleteTable(tname);
} }
} }
@Test
public void testGetClusterStatus() throws Exception {
AccessTestAction action = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preGetClusterStatus(ObserverContextImpl.createAndPrepare(CP_ENV));
return null;
}
};
verifyAllowed(action, SUPERUSER, USER_ADMIN);
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
} }

View File

@ -123,6 +123,7 @@ In case the table goes out of date, the unit tests which check for accuracy of p
| | getReplicationPeerConfig | superuser\|global(A) | | getReplicationPeerConfig | superuser\|global(A)
| | updateReplicationPeerConfig | superuser\|global(A) | | updateReplicationPeerConfig | superuser\|global(A)
| | listReplicationPeers | superuser\|global(A) | | listReplicationPeers | superuser\|global(A)
| | getClusterStatus | superuser\|global(A)
| Region | openRegion | superuser\|global(A) | Region | openRegion | superuser\|global(A)
| | closeRegion | superuser\|global(A) | | closeRegion | superuser\|global(A)
| | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C) | | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)