HBASE-23170 Admin#getRegionServers use ClusterMetrics.Option.SERVERS_NAME (#721)
This commit is contained in:
parent
b0b7e5f5b8
commit
d841245115
|
@ -1661,7 +1661,7 @@ public interface Admin extends Abortable, Closeable {
|
|||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
default Collection<ServerName> getRegionServers() throws IOException {
|
||||
return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
|
||||
return getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).getServersName();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1026,8 +1026,8 @@ public interface AsyncAdmin {
|
|||
* @return current live region servers list wrapped by {@link CompletableFuture}
|
||||
*/
|
||||
default CompletableFuture<Collection<ServerName>> getRegionServers() {
|
||||
return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
|
||||
.thenApply(cm -> cm.getLiveServerMetrics().keySet());
|
||||
return getClusterMetrics(EnumSet.of(Option.SERVERS_NAME))
|
||||
.thenApply(ClusterMetrics::getServersName);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2840,14 +2840,13 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
public CompletableFuture<Void> updateConfiguration() {
|
||||
CompletableFuture<Void> future = new CompletableFuture<Void>();
|
||||
addListener(
|
||||
getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.MASTER, Option.BACKUP_MASTERS)),
|
||||
getClusterMetrics(EnumSet.of(Option.SERVERS_NAME, Option.MASTER, Option.BACKUP_MASTERS)),
|
||||
(status, err) -> {
|
||||
if (err != null) {
|
||||
future.completeExceptionally(err);
|
||||
} else {
|
||||
List<CompletableFuture<Void>> futures = new ArrayList<>();
|
||||
status.getLiveServerMetrics().keySet()
|
||||
.forEach(server -> futures.add(updateConfiguration(server)));
|
||||
status.getServersName().forEach(server -> futures.add(updateConfiguration(server)));
|
||||
futures.add(updateConfiguration(status.getMasterName()));
|
||||
status.getBackupMasterNames().forEach(master -> futures.add(updateConfiguration(master)));
|
||||
addListener(
|
||||
|
@ -3154,12 +3153,12 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
CompletableFuture<List<ServerName>> future = new CompletableFuture<>();
|
||||
if (serverNamesList.isEmpty()) {
|
||||
CompletableFuture<ClusterMetrics> clusterMetricsCompletableFuture =
|
||||
getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
|
||||
getClusterMetrics(EnumSet.of(Option.SERVERS_NAME));
|
||||
addListener(clusterMetricsCompletableFuture, (clusterMetrics, err) -> {
|
||||
if (err != null) {
|
||||
future.completeExceptionally(err);
|
||||
} else {
|
||||
future.complete(new ArrayList<>(clusterMetrics.getLiveServerMetrics().keySet()));
|
||||
future.complete(clusterMetrics.getServersName());
|
||||
}
|
||||
});
|
||||
return future;
|
||||
|
|
|
@ -21,10 +21,8 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -90,8 +88,7 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
}
|
||||
|
||||
static ServerName [] getServers(Admin admin) throws IOException {
|
||||
Collection<ServerName> serversList =
|
||||
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
|
||||
Collection<ServerName> serversList = admin.getRegionServers();
|
||||
return serversList.toArray(new ServerName[serversList.size()]);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.io.DataInput;
|
|||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -36,7 +35,6 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.IntegrationTestBase;
|
||||
|
@ -755,10 +753,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
// Scale this up on a real cluster
|
||||
if (util.isDistributedCluster()) {
|
||||
util.getConfiguration().setIfUnset(NUM_MAPS_KEY,
|
||||
Integer.toString(util.getAdmin()
|
||||
.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
|
||||
.getLiveServerMetrics().size() * 10)
|
||||
);
|
||||
Integer.toString(util.getAdmin().getRegionServers().size() * 10));
|
||||
util.getConfiguration().setIfUnset(NUM_IMPORT_ROUNDS_KEY, "5");
|
||||
} else {
|
||||
util.startMiniMapReduceCluster();
|
||||
|
|
|
@ -25,7 +25,6 @@ import java.io.InterruptedIOException;
|
|||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
@ -42,7 +41,6 @@ import org.apache.hadoop.fs.LocatedFileStatus;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
@ -745,9 +743,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
// If we want to pre-split compute how many splits.
|
||||
if (conf.getBoolean(HBaseTestingUtility.PRESPLIT_TEST_TABLE_KEY,
|
||||
HBaseTestingUtility.PRESPLIT_TEST_TABLE)) {
|
||||
int numberOfServers =
|
||||
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
|
||||
.getLiveServerMetrics().size();
|
||||
int numberOfServers = admin.getRegionServers().size();
|
||||
if (numberOfServers == 0) {
|
||||
throw new IllegalStateException("No live regionservers");
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.apache.hadoop.hbase.master;
|
|||
import java.io.IOException;
|
||||
import java.text.DecimalFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -34,7 +33,6 @@ import java.util.TreeMap;
|
|||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
|
@ -207,8 +205,7 @@ public class RegionPlacementMaintainer {
|
|||
// Get the all the region servers
|
||||
List<ServerName> servers = new ArrayList<>();
|
||||
try (Admin admin = this.connection.getAdmin()) {
|
||||
servers.addAll(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
|
||||
.getLiveServerMetrics().keySet());
|
||||
servers.addAll(admin.getRegionServers());
|
||||
}
|
||||
|
||||
LOG.info("Start to generate assignment plan for " + numRegions +
|
||||
|
|
|
@ -1479,8 +1479,7 @@ public class CanaryTool implements Tool, Canary {
|
|||
|
||||
private void checkWriteTableDistribution() throws IOException {
|
||||
if (!admin.tableExists(writeTableName)) {
|
||||
int numberOfServers =
|
||||
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().size();
|
||||
int numberOfServers = admin.getRegionServers().size();
|
||||
if (numberOfServers == 0) {
|
||||
throw new IllegalStateException("No live regionservers");
|
||||
}
|
||||
|
@ -1492,9 +1491,9 @@ public class CanaryTool implements Tool, Canary {
|
|||
}
|
||||
|
||||
ClusterMetrics status =
|
||||
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.MASTER));
|
||||
int numberOfServers = status.getLiveServerMetrics().size();
|
||||
if (status.getLiveServerMetrics().containsKey(status.getMasterName())) {
|
||||
admin.getClusterMetrics(EnumSet.of(Option.SERVERS_NAME, Option.MASTER));
|
||||
int numberOfServers = status.getServersName().size();
|
||||
if (status.getServersName().contains(status.getMasterName())) {
|
||||
numberOfServers -= 1;
|
||||
}
|
||||
|
||||
|
@ -1795,8 +1794,7 @@ public class CanaryTool implements Tool, Canary {
|
|||
}
|
||||
|
||||
// get any live regionservers not serving any regions
|
||||
for (ServerName rs: this.admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
|
||||
.getLiveServerMetrics().keySet()) {
|
||||
for (ServerName rs: this.admin.getRegionServers()) {
|
||||
String rsName = rs.getHostname();
|
||||
if (!rsAndRMap.containsKey(rsName)) {
|
||||
rsAndRMap.put(rsName, Collections.<RegionInfo> emptyList());
|
||||
|
|
|
@ -416,9 +416,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable {
|
|||
try {
|
||||
// Get Online RegionServers
|
||||
List<ServerName> regionServers = new ArrayList<>();
|
||||
regionServers.addAll(
|
||||
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics()
|
||||
.keySet());
|
||||
regionServers.addAll(admin.getRegionServers());
|
||||
// Remove the host Region server from target Region Servers list
|
||||
ServerName server = stripServer(regionServers, hostname, port);
|
||||
if (server == null) {
|
||||
|
@ -550,9 +548,7 @@ public class RegionMover extends AbstractHBaseTool implements Closeable {
|
|||
while (EnvironmentEdgeManager.currentTime() < maxWait) {
|
||||
try {
|
||||
List<ServerName> regionServers = new ArrayList<>();
|
||||
regionServers.addAll(
|
||||
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics()
|
||||
.keySet());
|
||||
regionServers.addAll(admin.getRegionServers());
|
||||
// Remove the host Region server from target Region Servers list
|
||||
server = stripServer(regionServers, hostname, port);
|
||||
if (server != null) {
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.io.IOException;
|
|||
import java.math.BigInteger;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -35,8 +34,6 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -437,8 +434,7 @@ public class RegionSplitter {
|
|||
*/
|
||||
private static int getRegionServerCount(final Connection connection) throws IOException {
|
||||
try (Admin admin = connection.getAdmin()) {
|
||||
ClusterMetrics status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
|
||||
Collection<ServerName> servers = status.getLiveServerMetrics().keySet();
|
||||
Collection<ServerName> servers = admin.getRegionServers();
|
||||
return servers == null || servers.isEmpty()? 0: servers.size();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
@ -59,7 +58,6 @@ import org.apache.commons.logging.impl.Log4JLogger;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
|
||||
import org.apache.hadoop.hbase.Waiter.Predicate;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
@ -4092,9 +4090,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
// create a table a pre-splits regions.
|
||||
// The number of splits is set as:
|
||||
// region servers * regions per region server).
|
||||
int numberOfServers =
|
||||
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics()
|
||||
.size();
|
||||
int numberOfServers = admin.getRegionServers().size();
|
||||
if (numberOfServers == 0) {
|
||||
throw new IllegalStateException("No live regionservers");
|
||||
}
|
||||
|
|
|
@ -104,6 +104,7 @@ public class TestClientClusterMetrics {
|
|||
defaults.getLiveServerMetrics().size());
|
||||
Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort());
|
||||
Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size());
|
||||
Assert.assertEquals(ADMIN.getRegionServers().size(), defaults.getServersName().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -265,6 +265,7 @@ public class TestAsyncClusterAdminApi extends TestAsyncAdminBase {
|
|||
|
||||
// Check RegionLoad matches the regionLoad from ClusterStatus
|
||||
ClusterMetrics clusterStatus = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).get();
|
||||
assertEquals(servers.size(), clusterStatus.getLiveServerMetrics().size());
|
||||
for (Map.Entry<ServerName, ServerMetrics> entry :
|
||||
clusterStatus.getLiveServerMetrics().entrySet()) {
|
||||
ServerName sn = entry.getKey();
|
||||
|
|
Loading…
Reference in New Issue