HBASE-18609 Apply ClusterStatus#getClusterStatus(EnumSet<Option>) in code base

Signed-off-by: Chia-Ping Tsai <chia7712@gmail.com>
This commit is contained in:
Reid Chan 2017-09-13 15:08:24 +08:00 committed by Chia-Ping Tsai
parent 5370aed410
commit dc1db8c5b3
30 changed files with 160 additions and 66 deletions

View File

@ -845,21 +845,21 @@ public interface AsyncAdmin {
* @return current master server name wrapped by {@link CompletableFuture} * @return current master server name wrapped by {@link CompletableFuture}
*/ */
default CompletableFuture<ServerName> getMaster() { default CompletableFuture<ServerName> getMaster() {
return getClusterStatus().thenApply(ClusterStatus::getMaster); return getClusterStatus(EnumSet.of(Option.MASTER)).thenApply(ClusterStatus::getMaster);
} }
/** /**
* @return current backup master list wrapped by {@link CompletableFuture} * @return current backup master list wrapped by {@link CompletableFuture}
*/ */
default CompletableFuture<Collection<ServerName>> getBackupMasters() { default CompletableFuture<Collection<ServerName>> getBackupMasters() {
return getClusterStatus().thenApply(ClusterStatus::getBackupMasters); return getClusterStatus(EnumSet.of(Option.BACKUP_MASTERS)).thenApply(ClusterStatus::getBackupMasters);
} }
/** /**
* @return current live region servers list wrapped by {@link CompletableFuture} * @return current live region servers list wrapped by {@link CompletableFuture}
*/ */
default CompletableFuture<Collection<ServerName>> getRegionServers() { default CompletableFuture<Collection<ServerName>> getRegionServers() {
return getClusterStatus().thenApply(ClusterStatus::getServers); return getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).thenApply(ClusterStatus::getServers);
} }
/** /**

View File

@ -2450,7 +2450,7 @@ public class HBaseAdmin implements Admin {
@Override @Override
public String[] getMasterCoprocessors() { public String[] getMasterCoprocessors() {
try { try {
return getClusterStatus().getMasterCoprocessors(); return getClusterStatus(EnumSet.of(Option.MASTER_COPROCESSORS)).getMasterCoprocessors();
} catch (IOException e) { } catch (IOException e) {
LOG.error("Could not getClusterStatus()",e); LOG.error("Could not getClusterStatus()",e);
return null; return null;
@ -3155,13 +3155,15 @@ public class HBaseAdmin implements Admin {
@Override @Override
public void updateConfiguration() throws IOException { public void updateConfiguration() throws IOException {
for (ServerName server : this.getClusterStatus().getServers()) { ClusterStatus status = getClusterStatus(
EnumSet.of(Option.LIVE_SERVERS, Option.MASTER, Option.BACKUP_MASTERS));
for (ServerName server : status.getServers()) {
updateConfiguration(server); updateConfiguration(server);
} }
updateConfiguration(this.getClusterStatus().getMaster()); updateConfiguration(status.getMaster());
for (ServerName server : this.getClusterStatus().getBackupMasters()) { for (ServerName server : status.getBackupMasters()) {
updateConfiguration(server); updateConfiguration(server);
} }
} }

View File

@ -2498,7 +2498,9 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
@Override @Override
public CompletableFuture<Void> updateConfiguration() { public CompletableFuture<Void> updateConfiguration() {
CompletableFuture<Void> future = new CompletableFuture<Void>(); CompletableFuture<Void> future = new CompletableFuture<Void>();
getClusterStatus().whenComplete( getClusterStatus(
EnumSet.of(Option.LIVE_SERVERS, Option.MASTER, Option.BACKUP_MASTERS))
.whenComplete(
(status, err) -> { (status, err) -> {
if (err != null) { if (err != null) {
future.completeExceptionally(err); future.completeExceptionally(err);

View File

@ -20,12 +20,14 @@ package org.apache.hadoop.hbase.chaos.actions;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import org.apache.commons.lang3.RandomUtils; import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants; import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -55,7 +57,8 @@ public class MoveRegionsOfTableAction extends Action {
} }
Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin(); Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin();
Collection<ServerName> serversList = admin.getClusterStatus().getServers(); Collection<ServerName> serversList =
admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers();
ServerName[] servers = serversList.toArray(new ServerName[serversList.size()]); ServerName[] servers = serversList.toArray(new ServerName[serversList.size()]);
LOG.info("Performing action: Move regions of table " + tableName); LOG.info("Performing action: Move regions of table " + tableName);

View File

@ -24,6 +24,7 @@ import java.io.DataInput;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
@ -38,6 +39,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
@ -747,7 +749,9 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
// Scale this up on a real cluster // Scale this up on a real cluster
if (util.isDistributedCluster()) { if (util.isDistributedCluster()) {
util.getConfiguration().setIfUnset(NUM_MAPS_KEY, util.getConfiguration().setIfUnset(NUM_MAPS_KEY,
Integer.toString(util.getAdmin().getClusterStatus().getServersSize() * 10) Integer.toString(util.getAdmin()
.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
.getServersSize() * 10)
); );
util.getConfiguration().setIfUnset(NUM_IMPORT_ROUNDS_KEY, "5"); util.getConfiguration().setIfUnset(NUM_IMPORT_ROUNDS_KEY, "5");
} else { } else {

View File

@ -26,6 +26,7 @@ import java.io.InterruptedIOException;
import java.security.SecureRandom; import java.security.SecureRandom;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.EnumSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
@ -50,6 +51,7 @@ import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
@ -708,7 +710,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
// If we want to pre-split compute how many splits. // If we want to pre-split compute how many splits.
if (conf.getBoolean(HBaseTestingUtility.PRESPLIT_TEST_TABLE_KEY, if (conf.getBoolean(HBaseTestingUtility.PRESPLIT_TEST_TABLE_KEY,
HBaseTestingUtility.PRESPLIT_TEST_TABLE)) { HBaseTestingUtility.PRESPLIT_TEST_TABLE)) {
int numberOfServers = admin.getClusterStatus().getServers().size(); int numberOfServers =
admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
.getServers().size();
if (numberOfServers == 0) { if (numberOfServers == 0) {
throw new IllegalStateException("No live regionservers"); throw new IllegalStateException("No live regionservers");
} }

View File

@ -20,6 +20,7 @@
package org.apache.hadoop.hbase.rest; package org.apache.hadoop.hbase.rest;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet;
import javax.ws.rs.GET; import javax.ws.rs.GET;
import javax.ws.rs.Produces; import javax.ws.rs.Produces;
@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel; import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
@InterfaceAudience.Private @InterfaceAudience.Private
@ -68,7 +70,8 @@ public class StorageClusterStatusResource extends ResourceBase {
} }
servlet.getMetrics().incrementRequests(1); servlet.getMetrics().incrementRequests(1);
try { try {
ClusterStatus status = servlet.getAdmin().getClusterStatus(); ClusterStatus status = servlet.getAdmin().getClusterStatus(
EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS));
StorageClusterStatusModel model = new StorageClusterStatusModel(); StorageClusterStatusModel model = new StorageClusterStatusModel();
model.setRegions(status.getRegionsCount()); model.setRegions(status.getRegionsCount());
model.setRequests(status.getRequestsCount()); model.setRequests(status.getRequestsCount());

View File

@ -20,6 +20,7 @@
package org.apache.hadoop.hbase.rest; package org.apache.hadoop.hbase.rest;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet;
import javax.ws.rs.GET; import javax.ws.rs.GET;
import javax.ws.rs.Produces; import javax.ws.rs.Produces;
@ -33,6 +34,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel; import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
@InterfaceAudience.Private @InterfaceAudience.Private
@ -64,7 +66,9 @@ public class StorageClusterVersionResource extends ResourceBase {
servlet.getMetrics().incrementRequests(1); servlet.getMetrics().incrementRequests(1);
try { try {
StorageClusterVersionModel model = new StorageClusterVersionModel(); StorageClusterVersionModel model = new StorageClusterVersionModel();
model.setVersion(servlet.getAdmin().getClusterStatus().getHBaseVersion()); model.setVersion(
servlet.getAdmin().getClusterStatus(EnumSet.of(Option.HBASE_VERSION))
.getHBaseVersion());
ResponseBuilder response = Response.ok(model); ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl); response.cacheControl(cacheControl);
servlet.getMetrics().incrementSucessfulGetRequests(1); servlet.getMetrics().incrementSucessfulGetRequests(1);

View File

@ -25,6 +25,7 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.security.SecureRandom; import java.security.SecureRandom;
import java.util.EnumSet;
import java.util.HashSet; import java.util.HashSet;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.constraint.ConstraintException;
@ -263,7 +265,8 @@ public abstract class TestRSGroupsBase {
// return the real number of region servers, excluding the master embedded region server in 2.0+ // return the real number of region servers, excluding the master embedded region server in 2.0+
public int getNumServers() throws IOException { public int getNumServers() throws IOException {
ClusterStatus status = admin.getClusterStatus(); ClusterStatus status =
admin.getClusterStatus(EnumSet.of(Option.MASTER, Option.LIVE_SERVERS));
ServerName master = status.getMaster(); ServerName master = status.getMaster();
int count = 0; int count = 0;
for (ServerName sn : status.getServers()) { for (ServerName sn : status.getServers()) {
@ -489,8 +492,9 @@ public abstract class TestRSGroupsBase {
} }
//get server which is not a member of new group //get server which is not a member of new group
ServerName targetServer = null; ServerName targetServer = null;
for(ServerName server : admin.getClusterStatus().getServers()) { for (ServerName server : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
if(!newGroup.containsServer(server.getAddress())) { .getServers()) {
if (!newGroup.containsServer(server.getAddress())) {
targetServer = server; targetServer = server;
break; break;
} }
@ -518,7 +522,8 @@ public abstract class TestRSGroupsBase {
return return
getTableRegionMap().get(tableName) != null && getTableRegionMap().get(tableName) != null &&
getTableRegionMap().get(tableName).size() == 6 && getTableRegionMap().get(tableName).size() == 6 &&
admin.getClusterStatus().getRegionsInTransition().size() < 1; admin.getClusterStatus(EnumSet.of(Option.REGIONS_IN_TRANSITION))
.getRegionsInTransition().size() < 1;
} }
}); });
@ -722,7 +727,7 @@ public abstract class TestRSGroupsBase {
//get server which is not a member of new group //get server which is not a member of new group
ServerName targetServer = null; ServerName targetServer = null;
for(ServerName server : admin.getClusterStatus().getServers()) { for(ServerName server : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
if(!newGroup.containsServer(server.getAddress()) && if(!newGroup.containsServer(server.getAddress()) &&
!rsGroupAdmin.getRSGroupInfo("master").containsServer(server.getAddress())) { !rsGroupAdmin.getRSGroupInfo("master").containsServer(server.getAddress())) {
targetServer = server; targetServer = server;
@ -785,7 +790,8 @@ public abstract class TestRSGroupsBase {
return getTableRegionMap().get(tableName) != null && return getTableRegionMap().get(tableName) != null &&
getTableRegionMap().get(tableName).size() == 5 && getTableRegionMap().get(tableName).size() == 5 &&
getTableServerRegionMap().get(tableName).size() == 1 && getTableServerRegionMap().get(tableName).size() == 1 &&
admin.getClusterStatus().getRegionsInTransition().size() < 1; admin.getClusterStatus(EnumSet.of(Option.REGIONS_IN_TRANSITION))
.getRegionsInTransition().size() < 1;
} }
}); });

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException; import java.io.IOException;
import java.text.DecimalFormat; import java.text.DecimalFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
@ -206,7 +208,7 @@ public class RegionPlacementMaintainer {
// Get the all the region servers // Get the all the region servers
List<ServerName> servers = new ArrayList<>(); List<ServerName> servers = new ArrayList<>();
try (Admin admin = this.connection.getAdmin()) { try (Admin admin = this.connection.getAdmin()) {
servers.addAll(admin.getClusterStatus().getServers()); servers.addAll(admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers());
} }
LOG.info("Start to generate assignment plan for " + numRegions + LOG.info("Start to generate assignment plan for " + numRegions +

View File

@ -30,6 +30,7 @@ import java.net.InetSocketAddress;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.LinkedList; import java.util.LinkedList;
@ -56,6 +57,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.AuthUtil;
import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
@ -1177,7 +1179,9 @@ public final class Canary implements Tool {
private void checkWriteTableDistribution() throws IOException { private void checkWriteTableDistribution() throws IOException {
if (!admin.tableExists(writeTableName)) { if (!admin.tableExists(writeTableName)) {
int numberOfServers = admin.getClusterStatus().getServers().size(); int numberOfServers =
admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()
.size();
if (numberOfServers == 0) { if (numberOfServers == 0) {
throw new IllegalStateException("No live regionservers"); throw new IllegalStateException("No live regionservers");
} }
@ -1188,7 +1192,8 @@ public final class Canary implements Tool {
admin.enableTable(writeTableName); admin.enableTable(writeTableName);
} }
ClusterStatus status = admin.getClusterStatus(); ClusterStatus status =
admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS, Option.MASTER));
int numberOfServers = status.getServersSize(); int numberOfServers = status.getServersSize();
if (status.getServers().contains(status.getMaster())) { if (status.getServers().contains(status.getMaster())) {
numberOfServers -= 1; numberOfServers -= 1;
@ -1491,11 +1496,12 @@ public final class Canary implements Tool {
table.close(); table.close();
} }
//get any live regionservers not serving any regions // get any live regionservers not serving any regions
for (ServerName rs : this.admin.getClusterStatus().getServers()) { for (ServerName rs : this.admin
.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
String rsName = rs.getHostname(); String rsName = rs.getHostname();
if (!rsAndRMap.containsKey(rsName)) { if (!rsAndRMap.containsKey(rsName)) {
rsAndRMap.put(rsName, Collections.<HRegionInfo>emptyList()); rsAndRMap.put(rsName, Collections.<HRegionInfo> emptyList());
} }
} }
} catch (IOException e) { } catch (IOException e) {

View File

@ -42,6 +42,7 @@ import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
@ -85,6 +86,7 @@ import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
@ -519,7 +521,9 @@ public class HBaseFsck extends Configured implements Closeable {
connection = (ClusterConnection)ConnectionFactory.createConnection(getConf()); connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin(); admin = connection.getAdmin();
meta = connection.getTable(TableName.META_TABLE_NAME); meta = connection.getTable(TableName.META_TABLE_NAME);
status = admin.getClusterStatus(); status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS,
Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,
Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));
} }
/** /**
@ -2440,7 +2444,7 @@ public class HBaseFsck extends Configured implements Closeable {
LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI()); LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI());
int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication(); int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication();
HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
admin.getClusterStatus().getServers(), numReplicas); admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers(), numReplicas);
tryAssignmentRepair(hbi, "Trying to reassign region..."); tryAssignmentRepair(hbi, "Trying to reassign region...");
} }
@ -2467,7 +2471,7 @@ public class HBaseFsck extends Configured implements Closeable {
LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI()); LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI());
int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication(); int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication();
HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
admin.getClusterStatus().getServers(), numReplicas); admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers(), numReplicas);
tryAssignmentRepair(hbi, "Trying to fix unassigned region..."); tryAssignmentRepair(hbi, "Trying to fix unassigned region...");
} }

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.util;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
@ -123,7 +125,8 @@ public class HBaseFsckRepair {
while (EnvironmentEdgeManager.currentTime() < expiration) { while (EnvironmentEdgeManager.currentTime() < expiration) {
try { try {
boolean inTransition = false; boolean inTransition = false;
for (RegionState rs: admin.getClusterStatus().getRegionsInTransition()) { for (RegionState rs : admin.getClusterStatus(EnumSet.of(Option.REGIONS_IN_TRANSITION))
.getRegionsInTransition()) {
if (rs.getRegion().equals(region)) { if (rs.getRegion().equals(region)) {
inTransition = true; inTransition = true;
break; break;

View File

@ -29,6 +29,7 @@ import java.io.FileReader;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.Locale;
@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionFactory;
@ -745,8 +747,9 @@ public class RegionMover extends AbstractHBaseTool {
* @throws IOException * @throws IOException
*/ */
private void stripMaster(ArrayList<String> regionServers, Admin admin) throws IOException { private void stripMaster(ArrayList<String> regionServers, Admin admin) throws IOException {
String masterHostname = admin.getClusterStatus().getMaster().getHostname(); ServerName master = admin.getClusterStatus(EnumSet.of(Option.MASTER)).getMaster();
int masterPort = admin.getClusterStatus().getMaster().getPort(); String masterHostname = master.getHostname();
int masterPort = master.getPort();
try { try {
stripServer(regionServers, masterHostname, masterPort); stripServer(regionServers, masterHostname, masterPort);
} catch (Exception e) { } catch (Exception e) {
@ -821,7 +824,8 @@ public class RegionMover extends AbstractHBaseTool {
* @throws IOException * @throws IOException
*/ */
private ArrayList<String> getServers(Admin admin) throws IOException { private ArrayList<String> getServers(Admin admin) throws IOException {
ArrayList<ServerName> serverInfo = new ArrayList<>(admin.getClusterStatus().getServers()); ArrayList<ServerName> serverInfo = new ArrayList<>(
admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers());
ArrayList<String> regionServers = new ArrayList<>(serverInfo.size()); ArrayList<String> regionServers = new ArrayList<>(serverInfo.size());
for (ServerName server : serverInfo) { for (ServerName server : serverInfo) {
regionServers.add(server.getServerName()); regionServers.add(server.getServerName());

View File

@ -23,6 +23,7 @@ import java.math.BigInteger;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -46,6 +47,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HRegionLocation;
@ -417,7 +419,7 @@ public class RegionSplitter {
*/ */
private static int getRegionServerCount(final Connection connection) throws IOException { private static int getRegionServerCount(final Connection connection) throws IOException {
try (Admin admin = connection.getAdmin()) { try (Admin admin = connection.getAdmin()) {
ClusterStatus status = admin.getClusterStatus(); ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
Collection<ServerName> servers = status.getServers(); Collection<ServerName> servers = status.getServers();
return servers == null || servers.isEmpty()? 0: servers.size(); return servers == null || servers.isEmpty()? 0: servers.size();
} }

View File

@ -37,6 +37,7 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -59,6 +60,7 @@ import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -4037,7 +4039,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
// create a table a pre-splits regions. // create a table a pre-splits regions.
// The number of splits is set as: // The number of splits is set as:
// region servers * regions per region server). // region servers * regions per region server).
int numberOfServers = admin.getClusterStatus().getServers().size(); int numberOfServers =
admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()
.size();
if (numberOfServers == 0) { if (numberOfServers == 0) {
throw new IllegalStateException("No live regionservers"); throw new IllegalStateException("No live regionservers");
} }

View File

@ -23,6 +23,7 @@ import static org.junit.Assert.*;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -35,6 +36,7 @@ import org.junit.experimental.categories.Category;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -78,7 +80,8 @@ public class TestRegionLoad {
public void testRegionLoad() throws Exception { public void testRegionLoad() throws Exception {
// Check if regions match with the regionLoad from the server // Check if regions match with the regionLoad from the server
for (ServerName serverName : admin.getClusterStatus().getServers()) { for (ServerName serverName : admin
.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
List<HRegionInfo> regions = admin.getOnlineRegions(serverName); List<HRegionInfo> regions = admin.getOnlineRegions(serverName);
Collection<RegionLoad> regionLoads = admin.getRegionLoad(serverName).values(); Collection<RegionLoad> regionLoads = admin.getRegionLoad(serverName).values();
checkRegionsAndRegionLoads(regions, regionLoads); checkRegionsAndRegionLoads(regions, regionLoads);
@ -89,14 +92,15 @@ public class TestRegionLoad {
List<HRegionInfo> tableRegions = admin.getTableRegions(table); List<HRegionInfo> tableRegions = admin.getTableRegions(table);
List<RegionLoad> regionLoads = Lists.newArrayList(); List<RegionLoad> regionLoads = Lists.newArrayList();
for (ServerName serverName : admin.getClusterStatus().getServers()) { for (ServerName serverName : admin
.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
regionLoads.addAll(admin.getRegionLoad(serverName, table).values()); regionLoads.addAll(admin.getRegionLoad(serverName, table).values());
} }
checkRegionsAndRegionLoads(tableRegions, regionLoads); checkRegionsAndRegionLoads(tableRegions, regionLoads);
} }
// Check RegionLoad matches the regionLoad from ClusterStatus // Check RegionLoad matches the regionLoad from ClusterStatus
ClusterStatus clusterStatus = admin.getClusterStatus(); ClusterStatus clusterStatus = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
for (ServerName serverName : clusterStatus.getServers()) { for (ServerName serverName : clusterStatus.getServers()) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName); ServerLoad serverLoad = clusterStatus.getLoad(serverName);
Map<byte[], RegionLoad> regionLoads = admin.getRegionLoad(serverName); Map<byte[], RegionLoad> regionLoads = admin.getRegionLoad(serverName);

View File

@ -26,6 +26,7 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
@ -37,6 +38,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
@ -701,7 +703,8 @@ public class TestAdmin2 {
assertTrue(drainingServers.isEmpty()); assertTrue(drainingServers.isEmpty());
// Drain all region servers. // Drain all region servers.
Collection<ServerName> clusterServers = admin.getClusterStatus().getServers(); Collection<ServerName> clusterServers =
admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers();
drainingServers = new ArrayList<>(); drainingServers = new ArrayList<>();
for (ServerName server : clusterServers) { for (ServerName server : clusterServers) {
drainingServers.add(server); drainingServers.add(server);

View File

@ -27,6 +27,7 @@ import java.nio.file.Path;
import java.nio.file.StandardCopyOption; import java.nio.file.StandardCopyOption;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Optional; import java.util.Optional;
@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.ClientTests;
@ -240,7 +242,7 @@ public class TestAsyncClusterAdminApi extends TestAsyncAdminBase {
} }
// Check RegionLoad matches the regionLoad from ClusterStatus // Check RegionLoad matches the regionLoad from ClusterStatus
ClusterStatus clusterStatus = admin.getClusterStatus().get(); ClusterStatus clusterStatus = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).get();
for (ServerName serverName : clusterStatus.getServers()) { for (ServerName serverName : clusterStatus.getServers()) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName); ServerLoad serverLoad = clusterStatus.getLoad(serverName);
compareRegionLoads(serverLoad.getRegionsLoad().values(), admin.getRegionLoads(serverName) compareRegionLoads(serverLoad.getRegionsLoad().values(), admin.getRegionLoads(serverName)

View File

@ -31,6 +31,7 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
@ -68,6 +69,7 @@ import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
@ -4423,8 +4425,8 @@ public class TestFromClientSide {
boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration()); boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
try (Admin admin = conn.getAdmin()) { try (Admin admin = conn.getAdmin()) {
assertTrue(admin.tableExists(tableName)); assertTrue(admin.tableExists(tableName));
assertTrue(admin.getClusterStatus().getServersSize() == assertTrue(admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
SLAVES + (tablesOnMaster? 1: 0)); .getServersSize() == SLAVES + (tablesOnMaster ? 1 : 0));
} }
} }

View File

@ -26,6 +26,7 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
@ -34,6 +35,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
@ -406,7 +408,8 @@ public class TestMetaWithReplicas {
// check that the data in the znode is parseable (this would also mean the znode exists) // check that the data in the znode is parseable (this would also mean the znode exists)
byte[] data = ZKUtil.getData(zkw, primaryMetaZnode); byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
ServerName currentServer = ProtobufUtil.toServerName(data); ServerName currentServer = ProtobufUtil.toServerName(data);
Collection<ServerName> liveServers = TEST_UTIL.getAdmin().getClusterStatus().getServers(); Collection<ServerName> liveServers = TEST_UTIL.getAdmin()
.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers();
ServerName moveToServer = null; ServerName moveToServer = null;
for (ServerName s : liveServers) { for (ServerName s : liveServers) {
if (!currentServer.equals(s)) { if (!currentServer.equals(s)) {

View File

@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
@ -33,6 +34,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
@ -80,7 +82,8 @@ public class TestMasterOperationsForRegionReplicas {
TEST_UTIL.startMiniCluster(numSlaves); TEST_UTIL.startMiniCluster(numSlaves);
CONNECTION = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); CONNECTION = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
ADMIN = CONNECTION.getAdmin(); ADMIN = CONNECTION.getAdmin();
while(ADMIN.getClusterStatus().getServers().size() < numSlaves) { while(ADMIN.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
.getServers().size() < numSlaves) {
Thread.sleep(100); Thread.sleep(100);
} }
} }

View File

@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -44,6 +45,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.favored.FavoredNodesManager;
import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster;
@ -131,14 +133,14 @@ public class TestFavoredStochasticBalancerPickers extends BalancerTestBase {
TEST_UTIL.getHBaseCluster().startRegionServerAndWait(60000); TEST_UTIL.getHBaseCluster().startRegionServerAndWait(60000);
Map<ServerName, List<HRegionInfo>> serverAssignments = Maps.newHashMap(); Map<ServerName, List<HRegionInfo>> serverAssignments = Maps.newHashMap();
ClusterStatus status = admin.getClusterStatus(); ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
for (ServerName sn : status.getServers()) { for (ServerName sn : status.getServers()) {
if (!ServerName.isSameAddress(sn, masterServerName)) { if (!ServerName.isSameAddress(sn, masterServerName)) {
serverAssignments.put(sn, admin.getOnlineRegions(sn)); serverAssignments.put(sn, admin.getOnlineRegions(sn));
} }
} }
RegionLocationFinder regionFinder = new RegionLocationFinder(); RegionLocationFinder regionFinder = new RegionLocationFinder();
regionFinder.setClusterStatus(admin.getClusterStatus()); regionFinder.setClusterStatus(admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)));
regionFinder.setConf(conf); regionFinder.setConf(conf);
regionFinder.setServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); regionFinder.setServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
Cluster cluster = new Cluster(serverAssignments, null, regionFinder, new RackManager(conf)); Cluster cluster = new Cluster(serverAssignments, null, regionFinder, new RackManager(conf));
@ -182,7 +184,7 @@ public class TestFavoredStochasticBalancerPickers extends BalancerTestBase {
int maxRegions = 0; int maxRegions = 0;
ServerName maxLoadedServer = null; ServerName maxLoadedServer = null;
for (ServerName sn : admin.getClusterStatus().getServers()) { for (ServerName sn : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
if (admin.getOnlineRegions(sn).size() > maxRegions) { if (admin.getOnlineRegions(sn).size() > maxRegions) {
if (excludeNodes == null || !doesMatchExcludeNodes(excludeNodes, sn)) { if (excludeNodes == null || !doesMatchExcludeNodes(excludeNodes, sn)) {
maxRegions = admin.getOnlineRegions(sn).size(); maxRegions = admin.getOnlineRegions(sn).size();

View File

@ -25,6 +25,7 @@ import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper; import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.favored.FavoredNodesPlan; import org.apache.hadoop.hbase.favored.FavoredNodesPlan;
@ -151,7 +153,8 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
List<HRegionInfo> regions = admin.getTableRegions(tableName); List<HRegionInfo> regions = admin.getTableRegions(tableName);
regions.addAll(admin.getTableRegions(TableName.META_TABLE_NAME)); regions.addAll(admin.getTableRegions(TableName.META_TABLE_NAME));
regions.addAll(admin.getTableRegions(TableName.NAMESPACE_TABLE_NAME)); regions.addAll(admin.getTableRegions(TableName.NAMESPACE_TABLE_NAME));
List<ServerName> servers = Lists.newArrayList(admin.getClusterStatus().getServers()); List<ServerName> servers = Lists.newArrayList(
admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers());
Map<ServerName, List<HRegionInfo>> map = balancer.roundRobinAssignment(regions, servers); Map<ServerName, List<HRegionInfo>> map = balancer.roundRobinAssignment(regions, servers);
for (List<HRegionInfo> regionInfos : map.values()) { for (List<HRegionInfo> regionInfos : map.values()) {
regions.removeAll(regionInfos); regions.removeAll(regionInfos);
@ -177,10 +180,12 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favNodes.size()); assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favNodes.size());
} }
Map<ServerName, List<Integer>> replicaLoadMap = Map<ServerName, List<Integer>> replicaLoadMap = fnm.getReplicaLoad(
fnm.getReplicaLoad(Lists.newArrayList(admin.getClusterStatus().getServers())); Lists.newArrayList(admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
.getServers()));
assertTrue("Not all replica load collected.", assertTrue("Not all replica load collected.",
admin.getClusterStatus().getServers().size() == replicaLoadMap.size()); admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
.getServers().size() == replicaLoadMap.size());
for (Entry<ServerName, List<Integer>> entry : replicaLoadMap.entrySet()) { for (Entry<ServerName, List<Integer>> entry : replicaLoadMap.entrySet()) {
assertTrue(entry.getValue().size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); assertTrue(entry.getValue().size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM);
assertTrue(entry.getValue().get(0) >= 0); assertTrue(entry.getValue().get(0) >= 0);
@ -190,10 +195,12 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
admin.disableTable(TableName.valueOf(tableName)); admin.disableTable(TableName.valueOf(tableName));
admin.deleteTable(TableName.valueOf(tableName)); admin.deleteTable(TableName.valueOf(tableName));
replicaLoadMap = replicaLoadMap = fnm.getReplicaLoad(Lists.newArrayList(
fnm.getReplicaLoad(Lists.newArrayList(admin.getClusterStatus().getServers())); admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()));
assertTrue("replica load found " + replicaLoadMap.size() + " instead of 0.", assertTrue("replica load found " + replicaLoadMap.size() + " instead of 0.",
replicaLoadMap.size() == admin.getClusterStatus().getServers().size()); replicaLoadMap.size() == admin
.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()
.size());
} }
@Test @Test
@ -213,7 +220,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
LoadBalancer balancer = master.getLoadBalancer(); LoadBalancer balancer = master.getLoadBalancer();
ServerName destination = balancer.randomAssignment(hri, Lists.newArrayList(admin ServerName destination = balancer.randomAssignment(hri, Lists.newArrayList(admin
.getClusterStatus().getServers())); .getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()));
assertNotNull(destination); assertNotNull(destination);
List<ServerName> favoredNodes = fnm.getFavoredNodes(hri); List<ServerName> favoredNodes = fnm.getFavoredNodes(hri);
assertNotNull(favoredNodes); assertNotNull(favoredNodes);
@ -279,7 +286,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
assertNotNull(currentFN); assertNotNull(currentFN);
List<ServerName> serversForNewFN = Lists.newArrayList(); List<ServerName> serversForNewFN = Lists.newArrayList();
for (ServerName sn : admin.getClusterStatus().getServers()) { for (ServerName sn : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
serversForNewFN.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE)); serversForNewFN.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE));
} }
for (ServerName sn : currentFN) { for (ServerName sn : currentFN) {
@ -379,7 +386,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
// Regenerate FN and assign, everything else should be fine // Regenerate FN and assign, everything else should be fine
List<ServerName> serversForNewFN = Lists.newArrayList(); List<ServerName> serversForNewFN = Lists.newArrayList();
for (ServerName sn : admin.getClusterStatus().getServers()) { for (ServerName sn : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
serversForNewFN.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE)); serversForNewFN.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE));
} }
@ -473,7 +480,7 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
// Regenerate FN and assign, everything else should be fine // Regenerate FN and assign, everything else should be fine
List<ServerName> serversForNewFN = Lists.newArrayList(); List<ServerName> serversForNewFN = Lists.newArrayList();
for (ServerName sn : admin.getClusterStatus().getServers()) { for (ServerName sn : admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
serversForNewFN.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE)); serversForNewFN.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE));
} }

View File

@ -18,6 +18,7 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
@ -51,6 +52,7 @@ import org.junit.experimental.categories.Category;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -93,7 +95,7 @@ public class TestRegionServerReadRequestMetrics {
TEST_UTIL.getConfiguration().setBoolean(LoadBalancer.SYSTEM_TABLES_ON_MASTER, true); TEST_UTIL.getConfiguration().setBoolean(LoadBalancer.SYSTEM_TABLES_ON_MASTER, true);
TEST_UTIL.startMiniCluster(); TEST_UTIL.startMiniCluster();
admin = TEST_UTIL.getAdmin(); admin = TEST_UTIL.getAdmin();
serverNames = admin.getClusterStatus().getServers(); serverNames = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers();
table = createTable(); table = createTable();
putData(); putData();
tableRegions = admin.getTableRegions(TABLE_NAME); tableRegions = admin.getTableRegions(TABLE_NAME);
@ -154,7 +156,7 @@ public class TestRegionServerReadRequestMetrics {
boolean metricsUpdated = false; boolean metricsUpdated = false;
for (int i = 0; i < MAX_TRY; i++) { for (int i = 0; i < MAX_TRY; i++) {
for (ServerName serverName : serverNames) { for (ServerName serverName : serverNames) {
serverLoad = admin.getClusterStatus().getLoad(serverName); serverLoad = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getLoad(serverName);
Map<byte[], RegionLoad> regionsLoad = serverLoad.getRegionsLoad(); Map<byte[], RegionLoad> regionsLoad = serverLoad.getRegionsLoad();
for (HRegionInfo tableRegion : tableRegions) { for (HRegionInfo tableRegion : tableRegions) {

View File

@ -25,6 +25,7 @@ import static org.junit.Assert.fail;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
@ -186,7 +188,8 @@ public class TestMasterReplication {
Waiter.waitFor(baseConfiguration, 10000, new Waiter.Predicate<Exception>() { Waiter.waitFor(baseConfiguration, 10000, new Waiter.Predicate<Exception>() {
@Override @Override
public boolean evaluate() throws Exception { public boolean evaluate() throws Exception {
ClusterStatus clusterStatus = utilities[0].getAdmin().getClusterStatus(); ClusterStatus clusterStatus = utilities[0].getAdmin()
.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
ServerLoad serverLoad = clusterStatus.getLoad(rsName); ServerLoad serverLoad = clusterStatus.getLoad(rsName);
List<ReplicationLoadSource> replicationLoadSourceList = List<ReplicationLoadSource> replicationLoadSourceList =
serverLoad.getReplicationLoadSourceList(); serverLoad.getReplicationLoadSourceList();

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.replication;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -27,6 +28,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -66,7 +68,7 @@ public class TestReplicationStatus extends TestReplicationBase {
htable1.put(p); htable1.put(p);
} }
ClusterStatus status = hbaseAdmin.getClusterStatus(); ClusterStatus status = hbaseAdmin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
for (JVMClusterUtil.RegionServerThread thread : utility1.getHBaseCluster() for (JVMClusterUtil.RegionServerThread thread : utility1.getHBaseCluster()
.getRegionServerThreads()) { .getRegionServerThreads()) {
@ -89,7 +91,7 @@ public class TestReplicationStatus extends TestReplicationBase {
// Stop rs1, then the queue of rs1 will be transfered to rs0 // Stop rs1, then the queue of rs1 will be transfered to rs0
utility1.getHBaseCluster().getRegionServer(1).stop("Stop RegionServer"); utility1.getHBaseCluster().getRegionServer(1).stop("Stop RegionServer");
Thread.sleep(10000); Thread.sleep(10000);
status = hbaseAdmin.getClusterStatus(); status = hbaseAdmin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
ServerName server = utility1.getHBaseCluster().getRegionServer(0).getServerName(); ServerName server = utility1.getHBaseCluster().getRegionServer(0).getServerName();
ServerLoad sl = status.getLoad(server); ServerLoad sl = status.getLoad(server);
List<ReplicationLoadSource> rLoadSourceList = sl.getReplicationLoadSourceList(); List<ReplicationLoadSource> rLoadSourceList = sl.getReplicationLoadSourceList();

View File

@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
@ -328,7 +330,7 @@ public class BaseTestHBaseFsck {
* Get region info from local cluster. * Get region info from local cluster.
*/ */
Map<ServerName, List<String>> getDeployedHRIs(final Admin admin) throws IOException { Map<ServerName, List<String>> getDeployedHRIs(final Admin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus(); ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
Collection<ServerName> regionServers = status.getServers(); Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm = new HashMap<>(); Map<ServerName, List<String>> mm = new HashMap<>();
for (ServerName hsi : regionServers) { for (ServerName hsi : regionServers) {

View File

@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -46,6 +47,7 @@ import org.junit.rules.TestName;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -203,7 +205,7 @@ public class TestHBaseFsckReplicas extends BaseTestHBaseFsck {
} }
} }
Put put = new Put(metaKey); Put put = new Put(metaKey);
Collection<ServerName> var = admin.getClusterStatus().getServers(); Collection<ServerName> var = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers();
ServerName sn = var.toArray(new ServerName[var.size()])[0]; ServerName sn = var.toArray(new ServerName[var.size()])[0];
//add a location with replicaId as 2 (since we already have replicas with replicaid 0 and 1) //add a location with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), -1, 2); MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), -1, 2);
@ -285,7 +287,8 @@ public class TestHBaseFsckReplicas extends BaseTestHBaseFsck {
} }
} }
// get all the online regions in the regionservers // get all the online regions in the regionservers
Collection<ServerName> servers = admin.getClusterStatus().getServers(); Collection<ServerName> servers =
admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers();
Set<HRegionInfo> onlineRegions = new HashSet<>(); Set<HRegionInfo> onlineRegions = new HashSet<>();
for (ServerName s : servers) { for (ServerName s : servers) {
List<HRegionInfo> list = admin.getOnlineRegions(s); List<HRegionInfo> list = admin.getOnlineRegions(s);

View File

@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -32,6 +33,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@ -152,7 +154,8 @@ public class TestMiniClusterLoadSequential {
", isMultiPut=" + isMultiPut); ", isMultiPut=" + isMultiPut);
numKeys = numKeys(); numKeys = numKeys();
Admin admin = TEST_UTIL.getAdmin(); Admin admin = TEST_UTIL.getAdmin();
while (admin.getClusterStatus().getServers().size() < NUM_RS) { while (admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))
.getServers().size() < NUM_RS) {
LOG.info("Sleeping until " + NUM_RS + " RSs are online"); LOG.info("Sleeping until " + NUM_RS + " RSs are online");
Threads.sleepWithoutInterrupt(1000); Threads.sleepWithoutInterrupt(1000);
} }