HBASE-21938 Add a new ClusterMetrics.Option SERVERS_NAME to only return the live region servers's name without metrics
Signed-off-by: Guanghao Zhang <zghao@apache.org>
This commit is contained in:
parent
04caf89e80
commit
61621a33ce
|
@ -139,6 +139,8 @@ public interface ClusterMetrics {
|
||||||
|
|
||||||
int getMasterInfoPort();
|
int getMasterInfoPort();
|
||||||
|
|
||||||
|
List<ServerName> getServersName();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the average cluster load
|
* @return the average cluster load
|
||||||
*/
|
*/
|
||||||
|
@ -193,6 +195,10 @@ public interface ClusterMetrics {
|
||||||
/**
|
/**
|
||||||
* metrics info port
|
* metrics info port
|
||||||
*/
|
*/
|
||||||
MASTER_INFO_PORT
|
MASTER_INFO_PORT,
|
||||||
|
/**
|
||||||
|
* metrics about live region servers name
|
||||||
|
*/
|
||||||
|
SERVERS_NAME
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
|
||||||
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
|
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
|
||||||
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.Option;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||||
|
|
||||||
|
@ -67,7 +68,9 @@ public final class ClusterMetricsBuilder {
|
||||||
.setRegionState(r.convert())
|
.setRegionState(r.convert())
|
||||||
.build())
|
.build())
|
||||||
.collect(Collectors.toList()))
|
.collect(Collectors.toList()))
|
||||||
.setMasterInfoPort(metrics.getMasterInfoPort());
|
.setMasterInfoPort(metrics.getMasterInfoPort())
|
||||||
|
.addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName)
|
||||||
|
.collect(Collectors.toList()));
|
||||||
if (metrics.getMasterName() != null) {
|
if (metrics.getMasterName() != null) {
|
||||||
builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName())));
|
builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName())));
|
||||||
}
|
}
|
||||||
|
@ -103,6 +106,8 @@ public final class ClusterMetricsBuilder {
|
||||||
.collect(Collectors.toList()))
|
.collect(Collectors.toList()))
|
||||||
.setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream()
|
.setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream()
|
||||||
.map(HBaseProtos.Coprocessor::getName)
|
.map(HBaseProtos.Coprocessor::getName)
|
||||||
|
.collect(Collectors.toList()))
|
||||||
|
.setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName)
|
||||||
.collect(Collectors.toList()));
|
.collect(Collectors.toList()));
|
||||||
if (proto.hasClusterId()) {
|
if (proto.hasClusterId()) {
|
||||||
builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString());
|
builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString());
|
||||||
|
@ -142,6 +147,7 @@ public final class ClusterMetricsBuilder {
|
||||||
case MASTER: return ClusterMetrics.Option.MASTER;
|
case MASTER: return ClusterMetrics.Option.MASTER;
|
||||||
case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS;
|
case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS;
|
||||||
case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON;
|
case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON;
|
||||||
|
case SERVERS_NAME: return ClusterMetrics.Option.SERVERS_NAME;
|
||||||
case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT;
|
case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT;
|
||||||
// should not reach here
|
// should not reach here
|
||||||
default: throw new IllegalArgumentException("Invalid option: " + option);
|
default: throw new IllegalArgumentException("Invalid option: " + option);
|
||||||
|
@ -164,6 +170,7 @@ public final class ClusterMetricsBuilder {
|
||||||
case MASTER: return ClusterStatusProtos.Option.MASTER;
|
case MASTER: return ClusterStatusProtos.Option.MASTER;
|
||||||
case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS;
|
case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS;
|
||||||
case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON;
|
case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON;
|
||||||
|
case SERVERS_NAME: return Option.SERVERS_NAME;
|
||||||
case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT;
|
case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT;
|
||||||
// should not reach here
|
// should not reach here
|
||||||
default: throw new IllegalArgumentException("Invalid option: " + option);
|
default: throw new IllegalArgumentException("Invalid option: " + option);
|
||||||
|
@ -206,6 +213,7 @@ public final class ClusterMetricsBuilder {
|
||||||
@Nullable
|
@Nullable
|
||||||
private Boolean balancerOn;
|
private Boolean balancerOn;
|
||||||
private int masterInfoPort;
|
private int masterInfoPort;
|
||||||
|
private List<ServerName> serversName = Collections.emptyList();
|
||||||
|
|
||||||
private ClusterMetricsBuilder() {
|
private ClusterMetricsBuilder() {
|
||||||
}
|
}
|
||||||
|
@ -251,6 +259,10 @@ public final class ClusterMetricsBuilder {
|
||||||
this.masterInfoPort = value;
|
this.masterInfoPort = value;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
public ClusterMetricsBuilder setServerNames(List<ServerName> serversName) {
|
||||||
|
this.serversName = serversName;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
public ClusterMetrics build() {
|
public ClusterMetrics build() {
|
||||||
return new ClusterMetricsImpl(
|
return new ClusterMetricsImpl(
|
||||||
hbaseVersion,
|
hbaseVersion,
|
||||||
|
@ -262,7 +274,8 @@ public final class ClusterMetricsBuilder {
|
||||||
clusterId,
|
clusterId,
|
||||||
masterCoprocessorNames,
|
masterCoprocessorNames,
|
||||||
balancerOn,
|
balancerOn,
|
||||||
masterInfoPort);
|
masterInfoPort,
|
||||||
|
serversName);
|
||||||
}
|
}
|
||||||
private static class ClusterMetricsImpl implements ClusterMetrics {
|
private static class ClusterMetricsImpl implements ClusterMetrics {
|
||||||
@Nullable
|
@Nullable
|
||||||
|
@ -279,6 +292,7 @@ public final class ClusterMetricsBuilder {
|
||||||
@Nullable
|
@Nullable
|
||||||
private final Boolean balancerOn;
|
private final Boolean balancerOn;
|
||||||
private final int masterInfoPort;
|
private final int masterInfoPort;
|
||||||
|
private final List<ServerName> serversName;
|
||||||
|
|
||||||
ClusterMetricsImpl(String hbaseVersion, List<ServerName> deadServerNames,
|
ClusterMetricsImpl(String hbaseVersion, List<ServerName> deadServerNames,
|
||||||
Map<ServerName, ServerMetrics> liveServerMetrics,
|
Map<ServerName, ServerMetrics> liveServerMetrics,
|
||||||
|
@ -288,7 +302,8 @@ public final class ClusterMetricsBuilder {
|
||||||
String clusterId,
|
String clusterId,
|
||||||
List<String> masterCoprocessorNames,
|
List<String> masterCoprocessorNames,
|
||||||
Boolean balancerOn,
|
Boolean balancerOn,
|
||||||
int masterInfoPort) {
|
int masterInfoPort,
|
||||||
|
List<ServerName> serversName) {
|
||||||
this.hbaseVersion = hbaseVersion;
|
this.hbaseVersion = hbaseVersion;
|
||||||
this.deadServerNames = Preconditions.checkNotNull(deadServerNames);
|
this.deadServerNames = Preconditions.checkNotNull(deadServerNames);
|
||||||
this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics);
|
this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics);
|
||||||
|
@ -299,6 +314,7 @@ public final class ClusterMetricsBuilder {
|
||||||
this.masterCoprocessorNames = Preconditions.checkNotNull(masterCoprocessorNames);
|
this.masterCoprocessorNames = Preconditions.checkNotNull(masterCoprocessorNames);
|
||||||
this.balancerOn = balancerOn;
|
this.balancerOn = balancerOn;
|
||||||
this.masterInfoPort = masterInfoPort;
|
this.masterInfoPort = masterInfoPort;
|
||||||
|
this.serversName = serversName;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -351,6 +367,11 @@ public final class ClusterMetricsBuilder {
|
||||||
return masterInfoPort;
|
return masterInfoPort;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<ServerName> getServersName() {
|
||||||
|
return Collections.unmodifiableList(serversName);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
StringBuilder sb = new StringBuilder(1024);
|
StringBuilder sb = new StringBuilder(1024);
|
||||||
|
@ -365,11 +386,17 @@ public final class ClusterMetricsBuilder {
|
||||||
}
|
}
|
||||||
|
|
||||||
int serversSize = getLiveServerMetrics().size();
|
int serversSize = getLiveServerMetrics().size();
|
||||||
sb.append("\nNumber of live region servers: " + serversSize);
|
int serversNameSize = getServersName().size();
|
||||||
|
sb.append("\nNumber of live region servers: "
|
||||||
|
+ (serversSize > 0 ? serversSize : serversNameSize));
|
||||||
if (serversSize > 0) {
|
if (serversSize > 0) {
|
||||||
for (ServerName serverName : getLiveServerMetrics().keySet()) {
|
for (ServerName serverName : getLiveServerMetrics().keySet()) {
|
||||||
sb.append("\n " + serverName.getServerName());
|
sb.append("\n " + serverName.getServerName());
|
||||||
}
|
}
|
||||||
|
} else if (serversNameSize > 0) {
|
||||||
|
for (ServerName serverName : getServersName()) {
|
||||||
|
sb.append("\n " + serverName.getServerName());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int deadServerSize = getDeadServerNames().size();
|
int deadServerSize = getDeadServerNames().size();
|
||||||
|
|
|
@ -344,6 +344,11 @@ public class ClusterStatus implements ClusterMetrics {
|
||||||
return metrics.getMasterInfoPort();
|
return metrics.getMasterInfoPort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<ServerName> getServersName() {
|
||||||
|
return metrics.getServersName();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
StringBuilder sb = new StringBuilder(1024);
|
StringBuilder sb = new StringBuilder(1024);
|
||||||
|
@ -358,11 +363,17 @@ public class ClusterStatus implements ClusterMetrics {
|
||||||
}
|
}
|
||||||
|
|
||||||
int serversSize = getServersSize();
|
int serversSize = getServersSize();
|
||||||
sb.append("\nNumber of live region servers: " + serversSize);
|
int serversNameSize = getServersName().size();
|
||||||
|
sb.append("\nNumber of live region servers: "
|
||||||
|
+ (serversSize > 0 ? serversSize : serversNameSize));
|
||||||
if (serversSize > 0) {
|
if (serversSize > 0) {
|
||||||
for (ServerName serverName : metrics.getLiveServerMetrics().keySet()) {
|
for (ServerName serverName : metrics.getLiveServerMetrics().keySet()) {
|
||||||
sb.append("\n " + serverName.getServerName());
|
sb.append("\n " + serverName.getServerName());
|
||||||
}
|
}
|
||||||
|
} else if (serversNameSize > 0) {
|
||||||
|
for (ServerName serverName : getServersName()) {
|
||||||
|
sb.append("\n " + serverName.getServerName());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int deadServerSize = metrics.getDeadServerNames().size();
|
int deadServerSize = metrics.getDeadServerNames().size();
|
||||||
|
|
|
@ -229,6 +229,7 @@ message ClusterStatus {
|
||||||
repeated ServerName backup_masters = 8;
|
repeated ServerName backup_masters = 8;
|
||||||
optional bool balancer_on = 9;
|
optional bool balancer_on = 9;
|
||||||
optional int32 master_info_port = 10 [default = -1];
|
optional int32 master_info_port = 10 [default = -1];
|
||||||
|
repeated ServerName servers_name = 11;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum Option {
|
enum Option {
|
||||||
|
@ -242,4 +243,5 @@ enum Option {
|
||||||
REGIONS_IN_TRANSITION = 7;
|
REGIONS_IN_TRANSITION = 7;
|
||||||
BALANCER_ON = 8;
|
BALANCER_ON = 8;
|
||||||
MASTER_INFO_PORT = 9;
|
MASTER_INFO_PORT = 9;
|
||||||
|
SERVERS_NAME = 10;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2780,6 +2780,12 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case SERVERS_NAME: {
|
||||||
|
if (serverManager != null) {
|
||||||
|
builder.setServerNames(serverManager.getOnlineServersList());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
|
|
|
@ -96,6 +96,7 @@ public class TestClientClusterMetrics {
|
||||||
Assert.assertEquals(origin.getLiveServerMetrics().size(),
|
Assert.assertEquals(origin.getLiveServerMetrics().size(),
|
||||||
defaults.getLiveServerMetrics().size());
|
defaults.getLiveServerMetrics().size());
|
||||||
Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
|
Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
|
||||||
|
Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -121,6 +122,7 @@ public class TestClientClusterMetrics {
|
||||||
Assert.assertEquals(origin.getLiveServerMetrics().size(),
|
Assert.assertEquals(origin.getLiveServerMetrics().size(),
|
||||||
defaults.getLiveServerMetrics().size());
|
defaults.getLiveServerMetrics().size());
|
||||||
Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
|
Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
|
||||||
|
Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,7 +148,8 @@ public class TestClientClusterMetrics {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// Retrieve live servers and dead servers info.
|
// Retrieve live servers and dead servers info.
|
||||||
EnumSet<Option> options = EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS);
|
EnumSet<Option> options =
|
||||||
|
EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS, Option.SERVERS_NAME);
|
||||||
ClusterMetrics metrics = ADMIN.getClusterMetrics(options);
|
ClusterMetrics metrics = ADMIN.getClusterMetrics(options);
|
||||||
Assert.assertNotNull(metrics);
|
Assert.assertNotNull(metrics);
|
||||||
// exclude a dead region server
|
// exclude a dead region server
|
||||||
|
@ -160,6 +163,8 @@ public class TestClientClusterMetrics {
|
||||||
Assert.assertEquals(1, metrics.getDeadServerNames().size());
|
Assert.assertEquals(1, metrics.getDeadServerNames().size());
|
||||||
ServerName deadServerName = metrics.getDeadServerNames().iterator().next();
|
ServerName deadServerName = metrics.getDeadServerNames().iterator().next();
|
||||||
Assert.assertEquals(DEAD.getServerName(), deadServerName);
|
Assert.assertEquals(DEAD.getServerName(), deadServerName);
|
||||||
|
Assert.assertNotNull(metrics.getServersName());
|
||||||
|
Assert.assertEquals(numRs, metrics.getServersName().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
|
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
|
||||||
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
|
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
@ -99,6 +98,7 @@ public class TestClientClusterStatus {
|
||||||
Assert.assertTrue(origin.getServersSize() == defaults.getServersSize());
|
Assert.assertTrue(origin.getServersSize() == defaults.getServersSize());
|
||||||
Assert.assertTrue(origin.getMasterInfoPort() == defaults.getMasterInfoPort());
|
Assert.assertTrue(origin.getMasterInfoPort() == defaults.getMasterInfoPort());
|
||||||
Assert.assertTrue(origin.equals(defaults));
|
Assert.assertTrue(origin.equals(defaults));
|
||||||
|
Assert.assertTrue(origin.getServersName().size() == defaults.getServersName().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -136,7 +136,8 @@ public class TestClientClusterStatus {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// Retrieve live servers and dead servers info.
|
// Retrieve live servers and dead servers info.
|
||||||
EnumSet<Option> options = EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS);
|
EnumSet<Option> options =
|
||||||
|
EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS, Option.SERVERS_NAME);
|
||||||
ClusterStatus status = new ClusterStatus(ADMIN.getClusterMetrics(options));
|
ClusterStatus status = new ClusterStatus(ADMIN.getClusterMetrics(options));
|
||||||
checkPbObjectNotNull(status);
|
checkPbObjectNotNull(status);
|
||||||
Assert.assertNotNull(status);
|
Assert.assertNotNull(status);
|
||||||
|
@ -152,6 +153,8 @@ public class TestClientClusterStatus {
|
||||||
Assert.assertEquals(1, status.getDeadServersSize());
|
Assert.assertEquals(1, status.getDeadServersSize());
|
||||||
ServerName deadServerName = status.getDeadServerNames().iterator().next();
|
ServerName deadServerName = status.getDeadServerNames().iterator().next();
|
||||||
Assert.assertEquals(DEAD.getServerName(), deadServerName);
|
Assert.assertEquals(DEAD.getServerName(), deadServerName);
|
||||||
|
Assert.assertNotNull(status.getServersName());
|
||||||
|
Assert.assertEquals(numRs, status.getServersName().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
Loading…
Reference in New Issue