HBASE-12844 ServerManager.isServerReacable() should sleep between retries
This commit is contained in:
parent
7e872a81a9
commit
51575197de
|
@ -63,6 +63,8 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
|||
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.util.RetryCounter;
|
||||
import org.apache.hadoop.hbase.util.RetryCounterFactory;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
|
@ -141,6 +143,8 @@ public class ServerManager {
|
|||
private final long maxSkew;
|
||||
private final long warningSkew;
|
||||
|
||||
private final RetryCounterFactory pingRetryCounterFactory;
|
||||
|
||||
/**
|
||||
* Set of region servers which are dead but not processed immediately. If one
|
||||
* server died before master enables ServerShutdownHandler, the server will be
|
||||
|
@ -199,6 +203,11 @@ public class ServerManager {
|
|||
maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
|
||||
warningSkew = c.getLong("hbase.master.warningclockskew", 10000);
|
||||
this.connection = connect ? (ClusterConnection)ConnectionFactory.createConnection(c) : null;
|
||||
int pingMaxAttempts = Math.max(1, master.getConfiguration().getInt(
|
||||
"hbase.master.maximum.ping.server.attempts", 10));
|
||||
int pingSleepInterval = Math.max(1, master.getConfiguration().getInt(
|
||||
"hbase.master.ping.server.retry.sleep.interval", 100));
|
||||
this.pingRetryCounterFactory = new RetryCounterFactory(pingMaxAttempts, pingSleepInterval);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -793,9 +802,9 @@ public class ServerManager {
|
|||
*/
|
||||
public boolean isServerReachable(ServerName server) {
|
||||
if (server == null) throw new NullPointerException("Passed server is null");
|
||||
int maximumAttempts = Math.max(1, master.getConfiguration().getInt(
|
||||
"hbase.master.maximum.ping.server.attempts", 10));
|
||||
for (int i = 0; i < maximumAttempts; i++) {
|
||||
|
||||
RetryCounter retryCounter = pingRetryCounterFactory.create();
|
||||
while (retryCounter.shouldRetry()) {
|
||||
try {
|
||||
AdminService.BlockingInterface admin = getRsAdmin(server);
|
||||
if (admin != null) {
|
||||
|
@ -804,8 +813,13 @@ public class ServerManager {
|
|||
&& server.getStartcode() == info.getServerName().getStartCode();
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
LOG.debug("Couldn't reach " + server + ", try=" + i
|
||||
+ " of " + maximumAttempts, ioe);
|
||||
LOG.debug("Couldn't reach " + server + ", try=" + retryCounter.getAttemptTimes()
|
||||
+ " of " + retryCounter.getMaxAttempts(), ioe);
|
||||
try {
|
||||
retryCounter.sleepUntilNextRetry();
|
||||
} catch(InterruptedException ie) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.UnknownRegionException;
|
|||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
|
||||
|
@ -98,6 +97,8 @@ public class TestAssignmentManagerOnCluster {
|
|||
MyRegionObserver.class, RegionObserver.class);
|
||||
// Reduce the maximum attempts to speed up the test
|
||||
conf.setInt("hbase.assignment.maximum.attempts", 3);
|
||||
conf.setInt("hbase.master.maximum.ping.server.attempts", 3);
|
||||
conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1);
|
||||
|
||||
TEST_UTIL.startMiniCluster(1, 4, null, MyMaster.class, MyRegionServer.class);
|
||||
admin = TEST_UTIL.getHBaseAdmin();
|
||||
|
|
Loading…
Reference in New Issue