HBASE-15511 ClusterStatus should be able to return responses by scope
Signed-off-by: Chia-Ping Tsai <chia7712@gmail.com>
This commit is contained in:
parent
173dce7347
commit
923195c39e
|
@ -47,6 +47,28 @@ import org.apache.hadoop.io.VersionedWritable;
|
|||
* <li>Regions in transition at master</li>
|
||||
* <li>The unique cluster ID</li>
|
||||
* </ul>
|
||||
* <tt>{@link Options}</tt> provides a way to filter out infos which unwanted.
|
||||
* The following codes will retrieve all the cluster information.
|
||||
* <pre>
|
||||
* {@code
|
||||
* // Original version still works
|
||||
* Admin admin = connection.getAdmin();
|
||||
* ClusterStatus status = admin.getClusterStatus();
|
||||
* // or below, a new version which has the same effects
|
||||
* ClusterStatus status = admin.getClusterStatus(Options.defaultOptions());
|
||||
* }
|
||||
* </pre>
|
||||
* If information about dead servers and master coprocessors are unwanted,
|
||||
* then codes in the following way:
|
||||
* <pre>
|
||||
* {@code
|
||||
* Admin admin = connection.getAdmin();
|
||||
* ClusterStatus status = admin.getClusterStatus(
|
||||
* Options.defaultOptions()
|
||||
* .excludeDeadServers()
|
||||
* .excludeMasterCoprocessors());
|
||||
* }
|
||||
* </pre>
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public class ClusterStatus extends VersionedWritable {
|
||||
|
@ -72,6 +94,12 @@ public class ClusterStatus extends VersionedWritable {
|
|||
private String[] masterCoprocessors;
|
||||
private Boolean balancerOn;
|
||||
|
||||
/**
|
||||
* Use {@link ClusterStatus.Builder} to construct a ClusterStatus instead.
|
||||
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-15511">HBASE-15511</a>).
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterStatus(final String hbaseVersion, final String clusterid,
|
||||
final Map<ServerName, ServerLoad> servers,
|
||||
final Collection<ServerName> deadServers,
|
||||
|
@ -80,8 +108,8 @@ public class ClusterStatus extends VersionedWritable {
|
|||
final List<RegionState> rit,
|
||||
final String[] masterCoprocessors,
|
||||
final Boolean balancerOn) {
|
||||
// TODO: make this constructor private
|
||||
this.hbaseVersion = hbaseVersion;
|
||||
|
||||
this.liveServers = servers;
|
||||
this.deadServers = deadServers;
|
||||
this.master = master;
|
||||
|
@ -133,7 +161,8 @@ public class ClusterStatus extends VersionedWritable {
|
|||
*/
|
||||
public double getAverageLoad() {
|
||||
int load = getRegionsCount();
|
||||
return (double)load / (double)getServersSize();
|
||||
int serverSize = getServersSize();
|
||||
return serverSize != 0 ? (double)load / (double)serverSize : 0.0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -333,4 +362,268 @@ public class ClusterStatus extends VersionedWritable {
|
|||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() {
|
||||
return new Builder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Builder for construct a ClusterStatus.
|
||||
*/
|
||||
public static class Builder {
|
||||
private String hbaseVersion = null;
|
||||
private Map<ServerName, ServerLoad> liveServers = null;
|
||||
private Collection<ServerName> deadServers = null;
|
||||
private ServerName master = null;
|
||||
private Collection<ServerName> backupMasters = null;
|
||||
private List<RegionState> intransition = null;
|
||||
private String clusterId = null;
|
||||
private String[] masterCoprocessors = null;
|
||||
private Boolean balancerOn = null;
|
||||
|
||||
private Builder() {}
|
||||
|
||||
public Builder setHBaseVersion(String hbaseVersion) {
|
||||
this.hbaseVersion = hbaseVersion;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setLiveServers(Map<ServerName, ServerLoad> liveServers) {
|
||||
this.liveServers = liveServers;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setDeadServers(Collection<ServerName> deadServers) {
|
||||
this.deadServers = deadServers;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setMaster(ServerName master) {
|
||||
this.master = master;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setBackupMasters(Collection<ServerName> backupMasters) {
|
||||
this.backupMasters = backupMasters;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setRegionState(List<RegionState> intransition) {
|
||||
this.intransition = intransition;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setClusterId(String clusterId) {
|
||||
this.clusterId = clusterId;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setMasterCoprocessors(String[] masterCoprocessors) {
|
||||
this.masterCoprocessors = masterCoprocessors;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setBalancerOn(Boolean balancerOn) {
|
||||
this.balancerOn = balancerOn;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterStatus build() {
|
||||
return new ClusterStatus(hbaseVersion, clusterId, liveServers,
|
||||
deadServers, master, backupMasters, intransition, masterCoprocessors,
|
||||
balancerOn);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Options provides a way to filter out unwanted information.
|
||||
* For compatibility, default options includes all the information about a ClusterStatus.
|
||||
* To filter out unwanted information, use the specific excludeXXX() method.
|
||||
*/
|
||||
public static class Options {
|
||||
private boolean includeHBaseVersion = true;
|
||||
private boolean includeLiveServers = true;
|
||||
private boolean includeDeadServers = true;
|
||||
private boolean includeMaster = true;
|
||||
private boolean includeBackupMasters = true;
|
||||
private boolean includeRegionState = true;
|
||||
private boolean includeClusterId = true;
|
||||
private boolean includeMasterCoprocessors = true;
|
||||
private boolean includeBalancerOn = true;
|
||||
|
||||
private Options() {}
|
||||
|
||||
/**
|
||||
* Include all information about a ClusterStatus.
|
||||
*/
|
||||
public static Options getDefaultOptions() {
|
||||
return new Options();
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter out hbase verision.
|
||||
*/
|
||||
public Options excludeHBaseVersion() {
|
||||
includeHBaseVersion = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter out live servers.
|
||||
*/
|
||||
public Options excludeLiveServers() {
|
||||
includeLiveServers = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter out dead servers info.
|
||||
*/
|
||||
public Options excludeDeadServers() {
|
||||
includeDeadServers = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter out master info.
|
||||
*/
|
||||
public Options excludeMaster() {
|
||||
includeMaster = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter out backup masters info.
|
||||
*/
|
||||
public Options excludeBackupMasters() {
|
||||
includeBackupMasters = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter out region state.
|
||||
*/
|
||||
public Options excludeRegionState() {
|
||||
includeRegionState = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter out cluster id.
|
||||
*/
|
||||
public Options excludeClusterId() {
|
||||
includeClusterId = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter out master's coprocessors info.
|
||||
*/
|
||||
public Options excludeMasterCoprocessors() {
|
||||
includeMasterCoprocessors = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter out balancer on info.
|
||||
*/
|
||||
public Options excludeBalancerOn() {
|
||||
includeBalancerOn = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Include hbase version info.
|
||||
*/
|
||||
public boolean includeHBaseVersion() {
|
||||
return includeHBaseVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Include live servers info.
|
||||
*/
|
||||
public boolean includeLiveServers() {
|
||||
return includeLiveServers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Include dead servers info.
|
||||
*/
|
||||
public boolean includeDeadServers() {
|
||||
return includeDeadServers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Include master info.
|
||||
*/
|
||||
public boolean includeMaster() {
|
||||
return includeMaster;
|
||||
}
|
||||
|
||||
/**
|
||||
* Include backup masters info.
|
||||
*/
|
||||
public boolean includeBackupMasters() {
|
||||
return includeBackupMasters;
|
||||
}
|
||||
|
||||
/**
|
||||
* Include region states info.
|
||||
*/
|
||||
public boolean includeRegionState() {
|
||||
return includeRegionState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Include cluster id info.
|
||||
*/
|
||||
public boolean includeClusterId() {
|
||||
return includeClusterId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Include master's coprocessors.
|
||||
*/
|
||||
public boolean includeMasterCoprocessors() {
|
||||
return includeMasterCoprocessors;
|
||||
}
|
||||
|
||||
/**
|
||||
* Include balancer on info.
|
||||
*/
|
||||
public boolean includeBalancerOn() {
|
||||
return includeBalancerOn;
|
||||
}
|
||||
|
||||
/**
|
||||
* For an options reusable convenience, reset options to default.
|
||||
*/
|
||||
public Options reset() {
|
||||
includeHBaseVersion = true;
|
||||
includeLiveServers = true;
|
||||
includeDeadServers = true;
|
||||
includeMaster = true;
|
||||
includeBackupMasters = true;
|
||||
includeRegionState = true;
|
||||
includeClusterId = true;
|
||||
includeMasterCoprocessors = true;
|
||||
includeBalancerOn = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder("ClusterStatus info: [");
|
||||
builder.append("include hbase version: " + includeHBaseVersion + ", ");
|
||||
builder.append("include cluster id: " + includeClusterId + ", ");
|
||||
builder.append("include master info: " + includeMaster + ", ");
|
||||
builder.append("include backup masters info: " + includeBackupMasters + ", ");
|
||||
builder.append("include live servers info: " + includeLiveServers + ", ");
|
||||
builder.append("include dead servers info: " + includeDeadServers + ", ");
|
||||
builder.append("include masters coprocessors: " + includeMasterCoprocessors + ", ");
|
||||
builder.append("include region state: " + includeRegionState + ", ");
|
||||
builder.append("include balancer on: " + includeBalancerOn + "]");
|
||||
return builder.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.util.regex.Pattern;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Abortable;
|
||||
import org.apache.hadoop.hbase.ClusterStatus;
|
||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
|
@ -1304,6 +1305,13 @@ public interface Admin extends Abortable, Closeable {
|
|||
*/
|
||||
ClusterStatus getClusterStatus() throws IOException;
|
||||
|
||||
/**
|
||||
* Get cluster status with options to filter out unwanted status.
|
||||
* @return cluster status
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
ClusterStatus getClusterStatus(Options options) throws IOException;
|
||||
|
||||
/**
|
||||
* Get {@link RegionLoad} of all regions hosted on a regionserver.
|
||||
*
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.function.Function;
|
|||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.hadoop.hbase.ClusterStatus;
|
||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.ProcedureInfo;
|
||||
import org.apache.hadoop.hbase.RegionLoad;
|
||||
|
@ -832,6 +833,11 @@ public interface AsyncAdmin {
|
|||
*/
|
||||
CompletableFuture<ClusterStatus> getClusterStatus();
|
||||
|
||||
/**
|
||||
* @return cluster status wrapped by {@link CompletableFuture}
|
||||
*/
|
||||
CompletableFuture<ClusterStatus> getClusterStatus(Options options);
|
||||
|
||||
/**
|
||||
* @return current master server name wrapped by {@link CompletableFuture}
|
||||
*/
|
||||
|
|
|
@ -36,6 +36,7 @@ import io.netty.util.TimerTask;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.ClusterStatus;
|
||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.ProcedureInfo;
|
||||
import org.apache.hadoop.hbase.RegionLoad;
|
||||
|
@ -493,7 +494,12 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
|
|||
|
||||
@Override
|
||||
public CompletableFuture<ClusterStatus> getClusterStatus() {
|
||||
return wrap(rawAdmin.getClusterStatus());
|
||||
return getClusterStatus(Options.getDefaultOptions());
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<ClusterStatus> getClusterStatus(Options options) {
|
||||
return wrap(rawAdmin.getClusterStatus(options));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Abortable;
|
||||
import org.apache.hadoop.hbase.ClusterStatus;
|
||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
||||
import org.apache.hadoop.hbase.CompoundConfiguration;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
|
@ -2074,13 +2075,18 @@ public class HBaseAdmin implements Admin {
|
|||
|
||||
@Override
|
||||
public ClusterStatus getClusterStatus() throws IOException {
|
||||
return getClusterStatus(Options.getDefaultOptions());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterStatus getClusterStatus(Options options) throws IOException {
|
||||
return executeCallable(new MasterCallable<ClusterStatus>(getConnection(),
|
||||
this.rpcControllerFactory) {
|
||||
@Override
|
||||
protected ClusterStatus rpcCall() throws Exception {
|
||||
GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
|
||||
return ProtobufUtil.convert(master.getClusterStatus(getRpcController(), req).
|
||||
getClusterStatus());
|
||||
GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest(options);
|
||||
return ProtobufUtil.convert(
|
||||
master.getClusterStatus(getRpcController(), req).getClusterStatus());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -55,7 +55,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.ProcedureInfo;
|
||||
import org.apache.hadoop.hbase.RegionLoad;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
|
@ -69,6 +68,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
|
|||
import org.apache.hadoop.hbase.TableNotEnabledException;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.UnknownRegionException;
|
||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
|
||||
import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
|
||||
|
@ -2421,12 +2421,17 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
|
||||
@Override
|
||||
public CompletableFuture<ClusterStatus> getClusterStatus() {
|
||||
return getClusterStatus(Options.getDefaultOptions());
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<ClusterStatus> getClusterStatus(Options options) {
|
||||
return this
|
||||
.<ClusterStatus> newMasterCaller()
|
||||
.action(
|
||||
(controller, stub) -> this
|
||||
.<GetClusterStatusRequest, GetClusterStatusResponse, ClusterStatus> call(controller,
|
||||
stub, RequestConverter.buildGetClusterStatusRequest(),
|
||||
stub, RequestConverter.buildGetClusterStatusRequest(options),
|
||||
(s, c, req, done) -> s.getClusterStatus(c, req, done),
|
||||
resp -> ProtobufUtil.convert(resp.getClusterStatus()))).call();
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.shaded.protobuf;
|
||||
|
||||
import java.awt.image.BandCombineOp;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
@ -3069,6 +3070,7 @@ public final class ProtobufUtil {
|
|||
* @return the converted ClusterStatus
|
||||
*/
|
||||
public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) {
|
||||
ClusterStatus.Builder builder = ClusterStatus.newBuilder();
|
||||
|
||||
Map<ServerName, ServerLoad> servers = null;
|
||||
servers = new HashMap<>(proto.getLiveServersList().size());
|
||||
|
@ -3103,10 +3105,74 @@ public final class ProtobufUtil {
|
|||
masterCoprocessors[i] = proto.getMasterCoprocessors(i).getName();
|
||||
}
|
||||
|
||||
return new ClusterStatus(proto.getHbaseVersion().getVersion(),
|
||||
ClusterId.convert(proto.getClusterId()).toString(),servers,deadServers,
|
||||
ProtobufUtil.toServerName(proto.getMaster()),backupMasters,rit,masterCoprocessors,
|
||||
proto.getBalancerOn());
|
||||
String clusterId = null;
|
||||
if (proto.hasClusterId()) {
|
||||
clusterId = ClusterId.convert(proto.getClusterId()).toString();
|
||||
}
|
||||
|
||||
String hbaseVersion = null;
|
||||
if (proto.hasHbaseVersion()) {
|
||||
hbaseVersion = proto.getHbaseVersion().getVersion();
|
||||
}
|
||||
|
||||
ServerName master = null;
|
||||
if (proto.hasMaster()) {
|
||||
master = ProtobufUtil.toServerName(proto.getMaster());
|
||||
}
|
||||
|
||||
Boolean balancerOn = null;
|
||||
if (proto.hasBalancerOn()) {
|
||||
balancerOn = proto.getBalancerOn();
|
||||
}
|
||||
builder.setHBaseVersion(hbaseVersion)
|
||||
.setClusterId(clusterId)
|
||||
.setLiveServers(servers)
|
||||
.setDeadServers(deadServers)
|
||||
.setMaster(master)
|
||||
.setBackupMasters(backupMasters)
|
||||
.setRegionState(rit)
|
||||
.setMasterCoprocessors(masterCoprocessors)
|
||||
.setBalancerOn(balancerOn);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert proto ClusterStatus.Options to ClusterStatusProtos.Options
|
||||
* @param opt
|
||||
* @return proto ClusterStatus.Options
|
||||
*/
|
||||
public static ClusterStatus.Options toOptions (ClusterStatusProtos.Options opt) {
|
||||
ClusterStatus.Options option = ClusterStatus.Options.getDefaultOptions();
|
||||
if (!opt.getIncludeHbaseVersion()) option.excludeHBaseVersion();
|
||||
if (!opt.getIncludeLiveServers()) option.excludeLiveServers();
|
||||
if (!opt.getIncludeDeadServers()) option.excludeDeadServers();
|
||||
if (!opt.getIncludeRegionsState()) option.excludeRegionState();
|
||||
if (!opt.getIncludeClusterId()) option.excludeClusterId();
|
||||
if (!opt.getIncludeMasterCoprocessors()) option.excludeMasterCoprocessors();
|
||||
if (!opt.getIncludeMaster()) option.excludeMaster();
|
||||
if (!opt.getIncludeBackupMasters()) option.excludeBackupMasters();
|
||||
if (!opt.getIncludeBalancerOn()) option.excludeBalancerOn();
|
||||
return option;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert ClusterStatus.Options to proto ClusterStatusProtos.Options
|
||||
* @param opt
|
||||
* @return ClusterStatus.Options
|
||||
*/
|
||||
public static ClusterStatusProtos.Options toOptions(ClusterStatus.Options opt) {
|
||||
ClusterStatusProtos.Options.Builder option =
|
||||
ClusterStatusProtos.Options.newBuilder();
|
||||
option.setIncludeHbaseVersion(opt.includeHBaseVersion())
|
||||
.setIncludeLiveServers(opt.includeLiveServers())
|
||||
.setIncludeDeadServers(opt.includeDeadServers())
|
||||
.setIncludeRegionsState(opt.includeRegionState())
|
||||
.setIncludeClusterId(opt.includeClusterId())
|
||||
.setIncludeMasterCoprocessors(opt.includeMasterCoprocessors())
|
||||
.setIncludeMaster(opt.includeMaster())
|
||||
.setIncludeBackupMasters(opt.includeBackupMasters())
|
||||
.setIncludeBalancerOn(opt.includeBalancerOn());
|
||||
return option.build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3117,8 +3183,11 @@ public final class ProtobufUtil {
|
|||
public static ClusterStatusProtos.ClusterStatus convert(ClusterStatus status) {
|
||||
ClusterStatusProtos.ClusterStatus.Builder builder =
|
||||
ClusterStatusProtos.ClusterStatus.newBuilder();
|
||||
builder
|
||||
.setHbaseVersion(HBaseVersionFileContent.newBuilder().setVersion(status.getHBaseVersion()));
|
||||
if (status.getHBaseVersion() != null) {
|
||||
builder.setHbaseVersion(
|
||||
HBaseVersionFileContent.newBuilder()
|
||||
.setVersion(status.getHBaseVersion()));
|
||||
}
|
||||
|
||||
if (status.getServers() != null) {
|
||||
for (ServerName serverName : status.getServers()) {
|
||||
|
|
|
@ -26,10 +26,10 @@ import java.util.Set;
|
|||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.hadoop.hbase.CellScannable;
|
||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -1507,19 +1507,15 @@ public final class RequestConverter {
|
|||
return IsBalancerEnabledRequest.newBuilder().build();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see {@link #buildGetClusterStatusRequest}
|
||||
*/
|
||||
private static final GetClusterStatusRequest GET_CLUSTER_STATUS_REQUEST =
|
||||
GetClusterStatusRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Creates a protocol buffer GetClusterStatusRequest
|
||||
*
|
||||
* @return A GetClusterStatusRequest
|
||||
*/
|
||||
public static GetClusterStatusRequest buildGetClusterStatusRequest() {
|
||||
return GET_CLUSTER_STATUS_REQUEST;
|
||||
public static GetClusterStatusRequest buildGetClusterStatusRequest(Options opt) {
|
||||
return GetClusterStatusRequest.newBuilder()
|
||||
.setClusterOptions(ProtobufUtil.toOptions(opt))
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -225,3 +225,15 @@ message ClusterStatus {
|
|||
repeated ServerName backup_masters = 8;
|
||||
optional bool balancer_on = 9;
|
||||
}
|
||||
|
||||
message Options {
|
||||
optional bool include_hbase_version = 1 [default = true];
|
||||
optional bool include_live_servers = 2 [default = true];
|
||||
optional bool include_dead_servers = 3 [default = true];
|
||||
optional bool include_regions_state = 4 [default = true];
|
||||
optional bool include_cluster_id = 5 [default = true];
|
||||
optional bool include_master_coprocessors = 6 [default = true];
|
||||
optional bool include_master = 7 [default = true];
|
||||
optional bool include_backup_masters = 8 [default = true];
|
||||
optional bool include_balancer_on = 9 [default = true];
|
||||
}
|
||||
|
|
|
@ -485,6 +485,7 @@ message GetTableStateResponse {
|
|||
}
|
||||
|
||||
message GetClusterStatusRequest {
|
||||
required Options cluster_options = 1;
|
||||
}
|
||||
|
||||
message GetClusterStatusResponse {
|
||||
|
|
|
@ -225,3 +225,15 @@ message ClusterStatus {
|
|||
repeated ServerName backup_masters = 8;
|
||||
optional bool balancer_on = 9;
|
||||
}
|
||||
|
||||
message Options {
|
||||
optional bool include_hbase_version = 1 [default = true];
|
||||
optional bool include_live_servers = 2 [default = true];
|
||||
optional bool include_dead_servers = 3 [default = true];
|
||||
optional bool include_regions_state = 4 [default = true];
|
||||
optional bool include_cluster_id = 5 [default = true];
|
||||
optional bool include_master_coprocessors = 6 [default = true];
|
||||
optional bool include_master = 7 [default = true];
|
||||
optional bool include_backup_masters = 8 [default = true];
|
||||
optional bool include_balancer_on = 9 [default = true];
|
||||
}
|
||||
|
|
|
@ -161,18 +161,12 @@ public class ClusterStatusPublisher extends ScheduledChore {
|
|||
// We're reusing an existing protobuf message, but we don't send everything.
|
||||
// This could be extended in the future, for example if we want to send stuff like the
|
||||
// hbase:meta server name.
|
||||
ClusterStatus cs = new ClusterStatus(VersionInfo.getVersion(),
|
||||
master.getMasterFileSystem().getClusterId().toString(),
|
||||
null,
|
||||
sns,
|
||||
master.getServerName(),
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
|
||||
|
||||
publisher.publish(cs);
|
||||
ClusterStatus.Builder csBuilder = ClusterStatus.newBuilder();
|
||||
csBuilder.setHBaseVersion(VersionInfo.getVersion())
|
||||
.setClusterId(master.getMasterFileSystem().getClusterId().toString())
|
||||
.setMaster(master.getServerName())
|
||||
.setDeadServers(sns);
|
||||
publisher.publish(csBuilder.build());
|
||||
}
|
||||
|
||||
protected void cleanup() {
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.ClusterStatus;
|
||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
||||
import org.apache.hadoop.hbase.CoordinatedStateException;
|
||||
import org.apache.hadoop.hbase.CoordinatedStateManager;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
|
@ -2431,6 +2432,48 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
* @return cluster status
|
||||
*/
|
||||
public ClusterStatus getClusterStatus() throws InterruptedIOException {
|
||||
return getClusterStatus(Options.getDefaultOptions());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return cluster status
|
||||
*/
|
||||
public ClusterStatus getClusterStatus(Options options) throws InterruptedIOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Retrieving cluster status info. " + options);
|
||||
}
|
||||
ClusterStatus.Builder builder = ClusterStatus.newBuilder();
|
||||
if (options.includeHBaseVersion()) {
|
||||
builder.setHBaseVersion(VersionInfo.getVersion());
|
||||
}
|
||||
if (options.includeClusterId()) {
|
||||
builder.setClusterId(getClusterId());
|
||||
}
|
||||
if (options.includeLiveServers() && serverManager != null) {
|
||||
builder.setLiveServers(serverManager.getOnlineServers());
|
||||
}
|
||||
if (options.includeDeadServers() && serverManager != null) {
|
||||
builder.setDeadServers(serverManager.getDeadServers().copyServerNames());
|
||||
}
|
||||
if (options.includeMaster()) {
|
||||
builder.setMaster(getServerName());
|
||||
}
|
||||
if (options.includeBackupMasters()) {
|
||||
builder.setBackupMasters(getBackupMasters());
|
||||
}
|
||||
if (options.includeRegionState() && assignmentManager != null) {
|
||||
builder.setRegionState(assignmentManager.getRegionStates().getRegionsStateInTransition());
|
||||
}
|
||||
if (options.includeMasterCoprocessors() && cpHost != null) {
|
||||
builder.setMasterCoprocessors(getMasterCoprocessors());
|
||||
}
|
||||
if (options.includeBalancerOn() && loadBalancerTracker != null) {
|
||||
builder.setBalancerOn(loadBalancerTracker.isBalancerOn());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
private List<ServerName> getBackupMasters() throws InterruptedIOException {
|
||||
// Build Set of backup masters from ZK nodes
|
||||
List<String> backupMasterStrings;
|
||||
try {
|
||||
|
@ -2474,24 +2517,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
return s1.getServerName().compareTo(s2.getServerName());
|
||||
}});
|
||||
}
|
||||
|
||||
String clusterId = fileSystemManager != null ?
|
||||
fileSystemManager.getClusterId().toString() : null;
|
||||
List<RegionState> regionsInTransition = assignmentManager != null ?
|
||||
assignmentManager.getRegionStates().getRegionsStateInTransition() : null;
|
||||
|
||||
String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
|
||||
boolean balancerOn = loadBalancerTracker != null ?
|
||||
loadBalancerTracker.isBalancerOn() : false;
|
||||
Map<ServerName, ServerLoad> onlineServers = null;
|
||||
Set<ServerName> deadServers = null;
|
||||
if (serverManager != null) {
|
||||
deadServers = serverManager.getDeadServers().copyServerNames();
|
||||
onlineServers = serverManager.getOnlineServers();
|
||||
}
|
||||
return new ClusterStatus(VersionInfo.getVersion(), clusterId,
|
||||
onlineServers, deadServers, serverName, backupMasters,
|
||||
regionsInTransition, coprocessors, balancerOn);
|
||||
return backupMasters;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -762,7 +762,8 @@ public class MasterRpcServices extends RSRpcServices
|
|||
GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
|
||||
try {
|
||||
master.checkInitialized();
|
||||
response.setClusterStatus(ProtobufUtil.convert(master.getClusterStatus()));
|
||||
response.setClusterStatus(ProtobufUtil.convert(
|
||||
master.getClusterStatus(ProtobufUtil.toOptions(req.getClusterOptions()))));
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,221 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ClusterStatus;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.ClusterStatus.Options;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
|
||||
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
/**
|
||||
* Test the ClusterStatus.
|
||||
*/
|
||||
@Category(SmallTests.class)
|
||||
public class TestClientClusterStatus {
|
||||
private static HBaseTestingUtility UTIL;
|
||||
private static Admin ADMIN;
|
||||
private final static int SLAVES = 5;
|
||||
private final static int MASTERS = 3;
|
||||
private static MiniHBaseCluster CLUSTER;
|
||||
private static HRegionServer DEAD;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUpBeforeClass() throws Exception {
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
UTIL = new HBaseTestingUtility(conf);
|
||||
UTIL.startMiniCluster(MASTERS, SLAVES);
|
||||
CLUSTER = UTIL.getHBaseCluster();
|
||||
CLUSTER.waitForActiveAndReadyMaster();
|
||||
ADMIN = UTIL.getAdmin();
|
||||
// Kill one region server
|
||||
List<RegionServerThread> rsts = CLUSTER.getLiveRegionServerThreads();
|
||||
RegionServerThread rst = rsts.get(rsts.size() - 1);
|
||||
DEAD = rst.getRegionServer();
|
||||
DEAD.stop("Test dead servers status");
|
||||
while (!DEAD.isStopped()) {
|
||||
Thread.sleep(500);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDefaults() throws Exception {
|
||||
ClusterStatus origin = ADMIN.getClusterStatus();
|
||||
ClusterStatus defaults = ADMIN.getClusterStatus(Options.getDefaultOptions());
|
||||
Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
|
||||
Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
|
||||
Assert.assertTrue(origin.getAverageLoad() == defaults.getAverageLoad());
|
||||
Assert.assertTrue(origin.getBackupMastersSize() == defaults.getBackupMastersSize());
|
||||
Assert.assertTrue(origin.getDeadServersSize() == defaults.getDeadServersSize());
|
||||
Assert.assertTrue(origin.getRegionsCount() == defaults.getRegionsCount());
|
||||
Assert.assertTrue(origin.getServersSize() == defaults.getServersSize());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExclude() throws Exception {
|
||||
ClusterStatus.Options options = Options.getDefaultOptions();
|
||||
// Only retrieve master's coprocessors which are null in this test env.
|
||||
options.excludeHBaseVersion()
|
||||
.excludeBackupMasters()
|
||||
.excludeBalancerOn()
|
||||
.excludeClusterId()
|
||||
.excludeLiveServers()
|
||||
.excludeDeadServers()
|
||||
.excludeMaster()
|
||||
.excludeRegionState();
|
||||
ClusterStatus status = ADMIN.getClusterStatus(options);
|
||||
// Other cluster status info should be either null or empty.
|
||||
Assert.assertTrue(status.getMasterCoprocessors().length == 0);
|
||||
Assert.assertNull(status.getHBaseVersion());
|
||||
Assert.assertTrue(status.getBackupMasters().isEmpty());
|
||||
Assert.assertNull(status.getBalancerOn());
|
||||
Assert.assertNull(status.getClusterId());
|
||||
Assert.assertTrue(status.getServers().isEmpty());
|
||||
Assert.assertTrue(status.getDeadServerNames().isEmpty());
|
||||
Assert.assertNull(status.getMaster());
|
||||
Assert.assertTrue(status.getBackupMasters().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAsyncClient() throws Exception {
|
||||
AsyncRegistry registry = AsyncRegistryFactory.getRegistry(UTIL.getConfiguration());
|
||||
AsyncConnectionImpl asyncConnect = new AsyncConnectionImpl(UTIL.getConfiguration(), registry,
|
||||
registry.getClusterId().get(), User.getCurrent());
|
||||
AsyncAdmin asyncAdmin = asyncConnect.getAdmin();
|
||||
CompletableFuture<ClusterStatus> originFuture =
|
||||
asyncAdmin.getClusterStatus();
|
||||
CompletableFuture<ClusterStatus> defaultsFuture =
|
||||
asyncAdmin.getClusterStatus(Options.getDefaultOptions());
|
||||
ClusterStatus origin = originFuture.get();
|
||||
ClusterStatus defaults = defaultsFuture.get();
|
||||
Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
|
||||
Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
|
||||
Assert.assertTrue(origin.getAverageLoad() == defaults.getAverageLoad());
|
||||
Assert.assertTrue(origin.getBackupMastersSize() == defaults.getBackupMastersSize());
|
||||
Assert.assertTrue(origin.getDeadServersSize() == defaults.getDeadServersSize());
|
||||
Assert.assertTrue(origin.getRegionsCount() == defaults.getRegionsCount());
|
||||
Assert.assertTrue(origin.getServersSize() == defaults.getServersSize());
|
||||
if (asyncConnect != null) {
|
||||
asyncConnect.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLiveAndDeadServersStatus() throws Exception {
|
||||
List<RegionServerThread> regionserverThreads = CLUSTER.getLiveRegionServerThreads();
|
||||
int numRs = 0;
|
||||
int len = regionserverThreads.size();
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (regionserverThreads.get(i).isAlive()) {
|
||||
numRs++;
|
||||
}
|
||||
}
|
||||
// Retrieve live servers and dead servers info.
|
||||
ClusterStatus.Options options = Options.getDefaultOptions();
|
||||
options.excludeHBaseVersion()
|
||||
.excludeBackupMasters()
|
||||
.excludeBalancerOn()
|
||||
.excludeClusterId()
|
||||
.excludeMaster()
|
||||
.excludeMasterCoprocessors()
|
||||
.excludeRegionState();
|
||||
ClusterStatus status = ADMIN.getClusterStatus(options);
|
||||
Assert.assertNotNull(status);
|
||||
Assert.assertNotNull(status.getServers());
|
||||
// exclude a dead region server
|
||||
Assert.assertEquals(SLAVES - 1, numRs);
|
||||
// live servers = primary master + nums of regionservers
|
||||
Assert.assertEquals(status.getServers().size() - 1, numRs);
|
||||
Assert.assertTrue(status.getRegionsCount() > 0);
|
||||
Assert.assertNotNull(status.getDeadServerNames());
|
||||
Assert.assertEquals(1, status.getDeadServersSize());
|
||||
ServerName deadServerName = status.getDeadServerNames().iterator().next();
|
||||
Assert.assertEquals(DEAD.getServerName(), deadServerName);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMasterAndBackupMastersStatus() throws Exception {
|
||||
// get all the master threads
|
||||
List<MasterThread> masterThreads = CLUSTER.getMasterThreads();
|
||||
int numActive = 0;
|
||||
int activeIndex = 0;
|
||||
ServerName activeName = null;
|
||||
HMaster active = null;
|
||||
for (int i = 0; i < masterThreads.size(); i++) {
|
||||
if (masterThreads.get(i).getMaster().isActiveMaster()) {
|
||||
numActive++;
|
||||
activeIndex = i;
|
||||
active = masterThreads.get(activeIndex).getMaster();
|
||||
activeName = active.getServerName();
|
||||
}
|
||||
}
|
||||
Assert.assertNotNull(active);
|
||||
Assert.assertEquals(1, numActive);
|
||||
Assert.assertEquals(MASTERS, masterThreads.size());
|
||||
// Retrieve master and backup masters infos only.
|
||||
ClusterStatus.Options options = Options.getDefaultOptions();
|
||||
options.excludeHBaseVersion()
|
||||
.excludeBalancerOn()
|
||||
.excludeClusterId()
|
||||
.excludeLiveServers()
|
||||
.excludeDeadServers()
|
||||
.excludeMasterCoprocessors()
|
||||
.excludeRegionState();
|
||||
ClusterStatus status = ADMIN.getClusterStatus(options);
|
||||
Assert.assertTrue(status.getMaster().equals(activeName));
|
||||
Assert.assertEquals(MASTERS - 1, status.getBackupMastersSize());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOtherStatusInfos() throws Exception {
|
||||
ClusterStatus.Options options = Options.getDefaultOptions();
|
||||
options.excludeMaster()
|
||||
.excludeBackupMasters()
|
||||
.excludeRegionState()
|
||||
.excludeLiveServers()
|
||||
.excludeBackupMasters();
|
||||
ClusterStatus status = ADMIN.getClusterStatus(options);
|
||||
Assert.assertTrue(status.getMasterCoprocessors().length == 0);
|
||||
Assert.assertNotNull(status.getHBaseVersion());
|
||||
Assert.assertNotNull(status.getClusterId());
|
||||
Assert.assertTrue(status.getAverageLoad() == 0.0);
|
||||
Assert.assertNotNull(status.getBalancerOn() && !status.getBalancerOn());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownAfterClass() throws Exception {
|
||||
if (ADMIN != null) ADMIN.close();
|
||||
UTIL.shutdownMiniCluster();
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue