HBASE-10472 Manage the interruption in ZKUtil#getData

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1565787 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
nkeywal 2014-02-07 20:27:42 +00:00
parent 6f79ebe884
commit 4742456403
26 changed files with 185 additions and 54 deletions

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -105,6 +106,8 @@ class ZooKeeperRegistry implements Registry {
return ZKTableReadOnly.isDisabledTable(zkw, tableName); return ZKTableReadOnly.isDisabledTable(zkw, tableName);
} catch (KeeperException e) { } catch (KeeperException e) {
throw new IOException("Enable/Disable failed", e); throw new IOException("Enable/Disable failed", e);
} catch (InterruptedException e) {
throw new InterruptedIOException();
} finally { } finally {
zkw.close(); zkw.close();
} }

View File

@ -159,6 +159,8 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
throw new ReplicationException(e); throw new ReplicationException(e);
} catch (DeserializationException e) { } catch (DeserializationException e) {
throw new ReplicationException(e); throw new ReplicationException(e);
} catch (InterruptedException e) {
throw new ReplicationException(e);
} }
} }
@ -212,6 +214,8 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
} }
} catch (KeeperException e) { } catch (KeeperException e) {
this.abortable.abort("Cannot get the list of peers ", e); this.abortable.abort("Cannot get the list of peers ", e);
} catch (InterruptedException e) {
this.abortable.abort("Cannot get the list of peers ", e);
} }
return peers; return peers;
} }
@ -268,6 +272,11 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
} catch (KeeperException e) { } catch (KeeperException e) {
throw new ReplicationException("Error getting configuration for peer with id=" throw new ReplicationException("Error getting configuration for peer with id="
+ peerId, e); + peerId, e);
} catch (InterruptedException e) {
LOG.warn("Could not get configuration for peer because the thread " +
"was interrupted. peerId=" + peerId);
Thread.currentThread().interrupt();
return null;
} }
if (data == null) { if (data == null) {
LOG.error("Could not get configuration for peer because it doesn't exist. peerId=" + peerId); LOG.error("Could not get configuration for peer because it doesn't exist. peerId=" + peerId);

View File

@ -141,6 +141,9 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
} catch (KeeperException e) { } catch (KeeperException e) {
throw new ReplicationException("Internal Error: could not get position in log for queueId=" throw new ReplicationException("Internal Error: could not get position in log for queueId="
+ queueId + ", filename=" + filename, e); + queueId + ", filename=" + filename, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return 0;
} }
try { try {
return ZKUtil.parseHLogPositionFrom(bytes); return ZKUtil.parseHLogPositionFrom(bytes);
@ -338,6 +341,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
// Multi call failed; it looks like some other regionserver took away the logs. // Multi call failed; it looks like some other regionserver took away the logs.
LOG.warn("Got exception in copyQueuesFromRSUsingMulti: ", e); LOG.warn("Got exception in copyQueuesFromRSUsingMulti: ", e);
queues.clear(); queues.clear();
} catch (InterruptedException e) {
LOG.warn("Got exception in copyQueuesFromRSUsingMulti: ", e);
queues.clear();
Thread.currentThread().interrupt();
} }
return queues; return queues;
} }
@ -403,6 +410,9 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
} }
} catch (KeeperException e) { } catch (KeeperException e) {
this.abortable.abort("Copy queues from rs", e); this.abortable.abort("Copy queues from rs", e);
} catch (InterruptedException e) {
LOG.warn(e);
Thread.currentThread().interrupt();
} }
return queues; return queues;
} }

View File

@ -29,6 +29,7 @@ import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.data.Stat;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
/** /**
* Manages the location of the current active Master for the RegionServer. * Manages the location of the current active Master for the RegionServer.
@ -103,7 +104,12 @@ public class MasterAddressTracker extends ZooKeeperNodeTracker {
*/ */
public static ServerName getMasterAddress(final ZooKeeperWatcher zkw) public static ServerName getMasterAddress(final ZooKeeperWatcher zkw)
throws KeeperException, IOException { throws KeeperException, IOException {
byte [] data = ZKUtil.getData(zkw, zkw.getMasterAddressZNode()); byte [] data;
try {
data = ZKUtil.getData(zkw, zkw.getMasterAddressZNode());
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (data == null){ if (data == null){
throw new IOException("Can't get master address from ZooKeeper; znode data == null"); throw new IOException("Can't get master address from ZooKeeper; znode data == null");
} }

View File

@ -81,6 +81,9 @@ public class MetaRegionTracker extends ZooKeeperNodeTracker {
return ServerName.parseFrom(ZKUtil.getData(zkw, zkw.metaServerZNode)); return ServerName.parseFrom(ZKUtil.getData(zkw, zkw.metaServerZNode));
} catch (DeserializationException e) { } catch (DeserializationException e) {
throw ZKUtil.convert(e); throw ZKUtil.convert(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
} }
} }

View File

@ -63,7 +63,13 @@ public class ZKClusterId {
public static String readClusterIdZNode(ZooKeeperWatcher watcher) public static String readClusterIdZNode(ZooKeeperWatcher watcher)
throws KeeperException { throws KeeperException {
if (ZKUtil.checkExists(watcher, watcher.clusterIdZNode) != -1) { if (ZKUtil.checkExists(watcher, watcher.clusterIdZNode) != -1) {
byte [] data = ZKUtil.getData(watcher, watcher.clusterIdZNode); byte [] data;
try {
data = ZKUtil.getData(watcher, watcher.clusterIdZNode);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
}
if (data != null) { if (data != null) {
try { try {
return ClusterId.parseFrom(data).toString(); return ClusterId.parseFrom(data).toString();

View File

@ -170,6 +170,10 @@ public class ZKLeaderManager extends ZooKeeperListener {
watcher.abort("Unhandled zookeeper exception removing leader node", ke); watcher.abort("Unhandled zookeeper exception removing leader node", ke);
candidate.stop("Unhandled zookeeper exception removing leader node: " candidate.stop("Unhandled zookeeper exception removing leader node: "
+ ke.getMessage()); + ke.getMessage());
} catch (InterruptedException e) {
watcher.abort("Unhandled zookeeper exception removing leader node", e);
candidate.stop("Unhandled zookeeper exception removing leader node: "
+ e.getMessage());
} }
} }

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
import java.io.InterruptedIOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
@ -63,7 +64,7 @@ public class ZKTable {
// TODO: Make it so always a table znode. Put table schema here as well as table state. // TODO: Make it so always a table znode. Put table schema here as well as table state.
// Have watcher on table znode so all are notified of state or schema change. // Have watcher on table znode so all are notified of state or schema change.
public ZKTable(final ZooKeeperWatcher zkw) throws KeeperException { public ZKTable(final ZooKeeperWatcher zkw) throws KeeperException, InterruptedException {
super(); super();
this.watcher = zkw; this.watcher = zkw;
populateTableStates(); populateTableStates();
@ -74,7 +75,7 @@ public class ZKTable {
* @throws KeeperException * @throws KeeperException
*/ */
private void populateTableStates() private void populateTableStates()
throws KeeperException { throws KeeperException, InterruptedException {
synchronized (this.cache) { synchronized (this.cache) {
List<String> children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode); List<String> children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode);
if (children == null) return; if (children == null) return;
@ -316,7 +317,7 @@ public class ZKTable {
* @throws KeeperException * @throws KeeperException
*/ */
public static Set<TableName> getDisabledTables(ZooKeeperWatcher zkw) public static Set<TableName> getDisabledTables(ZooKeeperWatcher zkw)
throws KeeperException { throws KeeperException, InterruptedIOException {
return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED); return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED);
} }
@ -326,7 +327,7 @@ public class ZKTable {
* @throws KeeperException * @throws KeeperException
*/ */
public static Set<TableName> getDisablingTables(ZooKeeperWatcher zkw) public static Set<TableName> getDisablingTables(ZooKeeperWatcher zkw)
throws KeeperException { throws KeeperException, InterruptedIOException {
return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLING); return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLING);
} }
@ -336,7 +337,7 @@ public class ZKTable {
* @throws KeeperException * @throws KeeperException
*/ */
public static Set<TableName> getEnablingTables(ZooKeeperWatcher zkw) public static Set<TableName> getEnablingTables(ZooKeeperWatcher zkw)
throws KeeperException { throws KeeperException, InterruptedIOException {
return getAllTables(zkw, ZooKeeperProtos.Table.State.ENABLING); return getAllTables(zkw, ZooKeeperProtos.Table.State.ENABLING);
} }
@ -346,7 +347,7 @@ public class ZKTable {
* @throws KeeperException * @throws KeeperException
*/ */
public static Set<TableName> getDisabledOrDisablingTables(ZooKeeperWatcher zkw) public static Set<TableName> getDisabledOrDisablingTables(ZooKeeperWatcher zkw)
throws KeeperException { throws KeeperException, InterruptedIOException {
return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED, return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED,
ZooKeeperProtos.Table.State.DISABLING); ZooKeeperProtos.Table.State.DISABLING);
} }
@ -380,14 +381,19 @@ public class ZKTable {
* @throws KeeperException * @throws KeeperException
*/ */
static Set<TableName> getAllTables(final ZooKeeperWatcher zkw, static Set<TableName> getAllTables(final ZooKeeperWatcher zkw,
final ZooKeeperProtos.Table.State... states) throws KeeperException { final ZooKeeperProtos.Table.State... states) throws KeeperException, InterruptedIOException {
Set<TableName> allTables = new HashSet<TableName>(); Set<TableName> allTables = new HashSet<TableName>();
List<String> children = List<String> children =
ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
if(children == null) return allTables; if(children == null) return allTables;
for (String child: children) { for (String child: children) {
TableName tableName = TableName.valueOf(child); TableName tableName = TableName.valueOf(child);
ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(zkw, tableName); ZooKeeperProtos.Table.State state = null;
try {
state = ZKTableReadOnly.getTableState(zkw, tableName);
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
for (ZooKeeperProtos.Table.State expectedState: states) { for (ZooKeeperProtos.Table.State expectedState: states) {
if (state == expectedState) { if (state == expectedState) {
allTables.add(tableName); allTables.add(tableName);

View File

@ -55,7 +55,7 @@ public class ZKTableReadOnly {
*/ */
public static boolean isDisabledTable(final ZooKeeperWatcher zkw, public static boolean isDisabledTable(final ZooKeeperWatcher zkw,
final TableName tableName) final TableName tableName)
throws KeeperException { throws KeeperException, InterruptedException {
ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
return isTableState(ZooKeeperProtos.Table.State.DISABLED, state); return isTableState(ZooKeeperProtos.Table.State.DISABLED, state);
} }
@ -71,7 +71,7 @@ public class ZKTableReadOnly {
*/ */
public static boolean isEnabledTable(final ZooKeeperWatcher zkw, public static boolean isEnabledTable(final ZooKeeperWatcher zkw,
final TableName tableName) final TableName tableName)
throws KeeperException { throws KeeperException, InterruptedException {
return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED; return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED;
} }
@ -87,7 +87,7 @@ public class ZKTableReadOnly {
*/ */
public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw, public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw,
final TableName tableName) final TableName tableName)
throws KeeperException { throws KeeperException, InterruptedException {
ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) || return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) ||
isTableState(ZooKeeperProtos.Table.State.DISABLED, state); isTableState(ZooKeeperProtos.Table.State.DISABLED, state);
@ -99,7 +99,7 @@ public class ZKTableReadOnly {
* @throws KeeperException * @throws KeeperException
*/ */
public static Set<TableName> getDisabledTables(ZooKeeperWatcher zkw) public static Set<TableName> getDisabledTables(ZooKeeperWatcher zkw)
throws KeeperException { throws KeeperException, InterruptedException {
Set<TableName> disabledTables = new HashSet<TableName>(); Set<TableName> disabledTables = new HashSet<TableName>();
List<String> children = List<String> children =
ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
@ -118,7 +118,7 @@ public class ZKTableReadOnly {
* @throws KeeperException * @throws KeeperException
*/ */
public static Set<TableName> getDisabledOrDisablingTables(ZooKeeperWatcher zkw) public static Set<TableName> getDisabledOrDisablingTables(ZooKeeperWatcher zkw)
throws KeeperException { throws KeeperException, InterruptedException {
Set<TableName> disabledTables = new HashSet<TableName>(); Set<TableName> disabledTables = new HashSet<TableName>();
List<String> children = List<String> children =
ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
@ -146,7 +146,7 @@ public class ZKTableReadOnly {
*/ */
static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw,
final TableName tableName) final TableName tableName)
throws KeeperException { throws KeeperException, InterruptedException {
String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
byte [] data = ZKUtil.getData(zkw, znode); byte [] data = ZKUtil.getData(zkw, znode);
if (data == null || data.length <= 0) return null; if (data == null || data.length <= 0) return null;

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent; import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.DeleteNodeFailSilent; import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.DeleteNodeFailSilent;
@ -678,7 +679,7 @@ public class ZKUtil {
* error. * error.
*/ */
public static byte [] getData(ZooKeeperWatcher zkw, String znode) public static byte [] getData(ZooKeeperWatcher zkw, String znode)
throws KeeperException { throws KeeperException, InterruptedException {
try { try {
byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, null); byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, null);
logRetrievedMsg(zkw, znode, data, false); logRetrievedMsg(zkw, znode, data, false);
@ -691,10 +692,6 @@ public class ZKUtil {
LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e);
zkw.keeperException(e); zkw.keeperException(e);
return null; return null;
} catch (InterruptedException e) {
LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e);
zkw.interruptedException(e);
return null;
} }
} }
@ -1661,13 +1658,22 @@ public class ZKUtil {
do { do {
String znodeToProcess = stack.remove(stack.size() - 1); String znodeToProcess = stack.remove(stack.size() - 1);
sb.append("\n").append(znodeToProcess).append(": "); sb.append("\n").append(znodeToProcess).append(": ");
byte[] data = ZKUtil.getData(zkw, znodeToProcess); byte[] data;
try {
data = ZKUtil.getData(zkw, znodeToProcess);
} catch (InterruptedException e) {
zkw.interruptedException(e);
return;
}
if (data != null && data.length > 0) { // log position if (data != null && data.length > 0) { // log position
long position = 0; long position = 0;
try { try {
position = ZKUtil.parseHLogPositionFrom(ZKUtil.getData(zkw, znodeToProcess)); position = ZKUtil.parseHLogPositionFrom(ZKUtil.getData(zkw, znodeToProcess));
sb.append(position); sb.append(position);
} catch (Exception e) { } catch (DeserializationException ignored) {
} catch (InterruptedException e) {
zkw.interruptedException(e);
return;
} }
} }
for (String zNodeChild : ZKUtil.listChildrenNoWatch(zkw, znodeToProcess)) { for (String zNodeChild : ZKUtil.listChildrenNoWatch(zkw, znodeToProcess)) {
@ -1682,7 +1688,13 @@ public class ZKUtil {
sb.append("\n").append(peersZnode).append(": "); sb.append("\n").append(peersZnode).append(": ");
for (String peerIdZnode : ZKUtil.listChildrenNoWatch(zkw, peersZnode)) { for (String peerIdZnode : ZKUtil.listChildrenNoWatch(zkw, peersZnode)) {
String znodeToProcess = ZKUtil.joinZNode(peersZnode, peerIdZnode); String znodeToProcess = ZKUtil.joinZNode(peersZnode, peerIdZnode);
byte[] data = ZKUtil.getData(zkw, znodeToProcess); byte[] data;
try {
data = ZKUtil.getData(zkw, znodeToProcess);
} catch (InterruptedException e) {
zkw.interruptedException(e);
return;
}
// parse the data of the above peer znode. // parse the data of the above peer znode.
try { try {
String clusterKey = ZooKeeperProtos.ReplicationPeer.newBuilder(). String clusterKey = ZooKeeperProtos.ReplicationPeer.newBuilder().
@ -1705,9 +1717,15 @@ public class ZKUtil {
if (!child.equals(peerState)) continue; if (!child.equals(peerState)) continue;
String peerStateZnode = ZKUtil.joinZNode(znodeToProcess, child); String peerStateZnode = ZKUtil.joinZNode(znodeToProcess, child);
sb.append("\n").append(peerStateZnode).append(": "); sb.append("\n").append(peerStateZnode).append(": ");
byte[] peerStateData = ZKUtil.getData(zkw, peerStateZnode); byte[] peerStateData;
try {
peerStateData = ZKUtil.getData(zkw, peerStateZnode);
sb.append(ZooKeeperProtos.ReplicationState.newBuilder() sb.append(ZooKeeperProtos.ReplicationState.newBuilder()
.mergeFrom(peerStateData, pblen, peerStateData.length - pblen).getState().name()); .mergeFrom(peerStateData, pblen, peerStateData.length - pblen).getState().name());
} catch (InterruptedException e) {
zkw.interruptedException(e);
return;
}
} }
} }

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
@ -282,7 +283,11 @@ public class AssignmentManager extends ZooKeeperListener {
this.timeoutMonitor = null; this.timeoutMonitor = null;
this.timerUpdater = null; this.timerUpdater = null;
} }
try {
this.zkTable = new ZKTable(this.watcher); this.zkTable = new ZKTable(this.watcher);
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
// This is the max attempts, not retries, so it should be at least 1. // This is the max attempts, not retries, so it should be at least 1.
this.maximumAttempts = Math.max(1, this.maximumAttempts = Math.max(1,
this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10)); this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException; import java.lang.reflect.InvocationTargetException;
import java.net.InetAddress; import java.net.InetAddress;
@ -1426,7 +1427,7 @@ MasterServices, Server {
return balancerCutoffTime; return balancerCutoffTime;
} }
public boolean balance() throws HBaseIOException { public boolean balance() throws IOException {
// if master not initialized, don't run balancer. // if master not initialized, don't run balancer.
if (!this.initialized) { if (!this.initialized) {
LOG.debug("Master has not been initialized, don't run balancer."); LOG.debug("Master has not been initialized, don't run balancer.");
@ -1513,7 +1514,7 @@ MasterServices, Server {
public BalanceResponse balance(RpcController c, BalanceRequest request) throws ServiceException { public BalanceResponse balance(RpcController c, BalanceRequest request) throws ServiceException {
try { try {
return BalanceResponse.newBuilder().setBalancerRan(balance()).build(); return BalanceResponse.newBuilder().setBalancerRan(balance()).build();
} catch (HBaseIOException ex) { } catch (IOException ex) {
throw new ServiceException(ex); throw new ServiceException(ex);
} }
} }
@ -2078,14 +2079,18 @@ MasterServices, Server {
GetClusterStatusRequest req) GetClusterStatusRequest req)
throws ServiceException { throws ServiceException {
GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder(); GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
try {
response.setClusterStatus(getClusterStatus().convert()); response.setClusterStatus(getClusterStatus().convert());
} catch (InterruptedIOException e) {
throw new ServiceException(e);
}
return response.build(); return response.build();
} }
/** /**
* @return cluster status * @return cluster status
*/ */
public ClusterStatus getClusterStatus() { public ClusterStatus getClusterStatus() throws InterruptedIOException {
// Build Set of backup masters from ZK nodes // Build Set of backup masters from ZK nodes
List<String> backupMasterStrings; List<String> backupMasterStrings;
try { try {
@ -2099,9 +2104,13 @@ MasterServices, Server {
backupMasterStrings.size()); backupMasterStrings.size());
for (String s: backupMasterStrings) { for (String s: backupMasterStrings) {
try { try {
byte [] bytes = byte [] bytes;
ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode( try {
bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
this.zooKeeper.backupMasterAddressesZNode, s)); this.zooKeeper.backupMasterAddressesZNode, s));
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (bytes != null) { if (bytes != null) {
ServerName sn; ServerName sn;
try { try {

View File

@ -390,7 +390,7 @@ public class MasterFileSystem {
* @throws KeeperException * @throws KeeperException
*/ */
void removeStaleRecoveringRegionsFromZK(final Set<ServerName> failedServers) void removeStaleRecoveringRegionsFromZK(final Set<ServerName> failedServers)
throws KeeperException { throws KeeperException, InterruptedIOException {
this.splitLogManager.removeStaleRecoveringRegionsFromZK(failedServers); this.splitLogManager.removeStaleRecoveringRegionsFromZK(failedServers);
} }

View File

@ -25,6 +25,7 @@ import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.I
import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.SUCCESS; import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.SUCCESS;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
@ -569,7 +570,7 @@ public class SplitLogManager extends ZooKeeperListener {
* @throws KeeperException * @throws KeeperException
*/ */
void removeStaleRecoveringRegionsFromZK(final Set<ServerName> failedServers) void removeStaleRecoveringRegionsFromZK(final Set<ServerName> failedServers)
throws KeeperException { throws KeeperException, InterruptedIOException {
if (!this.distributedLogReplay) { if (!this.distributedLogReplay) {
// remove any regions in recovery from ZK which could happen when we turn the feature on // remove any regions in recovery from ZK which could happen when we turn the feature on
@ -591,7 +592,12 @@ public class SplitLogManager extends ZooKeeperListener {
List<String> tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.splitLogZNode); List<String> tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.splitLogZNode);
if (tasks != null) { if (tasks != null) {
for (String t : tasks) { for (String t : tasks) {
byte[] data = ZKUtil.getData(this.watcher, ZKUtil.joinZNode(watcher.splitLogZNode, t)); byte[] data;
try {
data = ZKUtil.getData(this.watcher, ZKUtil.joinZNode(watcher.splitLogZNode, t));
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (data != null) { if (data != null) {
SplitLogTask slt = null; SplitLogTask slt = null;
try { try {
@ -1115,7 +1121,7 @@ public class SplitLogManager extends ZooKeeperListener {
* @param userRegions user regiones assigned on the region server * @param userRegions user regiones assigned on the region server
*/ */
void markRegionsRecoveringInZK(final ServerName serverName, Set<HRegionInfo> userRegions) void markRegionsRecoveringInZK(final ServerName serverName, Set<HRegionInfo> userRegions)
throws KeeperException { throws KeeperException, InterruptedIOException {
if (userRegions == null || !this.distributedLogReplay) { if (userRegions == null || !this.distributedLogReplay) {
return; return;
} }
@ -1172,9 +1178,11 @@ public class SplitLogManager extends ZooKeeperListener {
// wait a little bit for retry // wait a little bit for retry
try { try {
Thread.sleep(20); Thread.sleep(20);
} catch (Exception ignoreE) { } catch (InterruptedException e1) {
// ignore throw new InterruptedIOException();
} }
} catch (InterruptedException e) {
throw new InterruptedIOException();
} }
} while ((--retries) > 0 && (!this.stopper.isStopped())); } while ((--retries) > 0 && (!this.stopper.isStopped()));
} }
@ -1240,7 +1248,12 @@ public class SplitLogManager extends ZooKeeperListener {
String nodePath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, encodedRegionName); String nodePath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, encodedRegionName);
nodePath = ZKUtil.joinZNode(nodePath, serverName); nodePath = ZKUtil.joinZNode(nodePath, serverName);
try { try {
byte[] data = ZKUtil.getData(zkw, nodePath); byte[] data;
try {
data = ZKUtil.getData(zkw, nodePath);
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (data != null) { if (data != null) {
result = ZKUtil.parseRegionStoreSequenceIds(data); result = ZKUtil.parseRegionStoreSequenceIds(data);
} }

View File

@ -25,6 +25,8 @@ import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import java.io.IOException;
/** /**
* Chore that will call HMaster.balance{@link org.apache.hadoop.hbase.master.HMaster#balance()} when * Chore that will call HMaster.balance{@link org.apache.hadoop.hbase.master.HMaster#balance()} when
* needed. * needed.
@ -46,7 +48,7 @@ public class BalancerChore extends Chore {
protected void chore() { protected void chore() {
try { try {
master.balance(); master.balance();
} catch (HBaseIOException e) { } catch (IOException e) {
LOG.error("Failed to balance.", e); LOG.error("Failed to balance.", e);
} }
} }

View File

@ -18,17 +18,21 @@
package org.apache.hadoop.hbase.master.balancer; package org.apache.hadoop.hbase.master.balancer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.LoadBalancer;
import java.io.InterruptedIOException;
/** /**
* Chore that will feed the balancer the cluster status. * Chore that will feed the balancer the cluster status.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class ClusterStatusChore extends Chore { public class ClusterStatusChore extends Chore {
private static final Log LOG = LogFactory.getLog(ClusterStatusChore.class);
private final HMaster master; private final HMaster master;
private final LoadBalancer balancer; private final LoadBalancer balancer;
@ -42,6 +46,10 @@ public class ClusterStatusChore extends Chore {
@Override @Override
protected void chore() { protected void chore() {
try {
balancer.setClusterStatus(master.getClusterStatus()); balancer.setClusterStatus(master.getClusterStatus());
} catch (InterruptedIOException e) {
LOG.warn("Ignoring interruption", e);
}
} }
} }

View File

@ -250,6 +250,10 @@ public class ZKProcedureCoordinatorRpcs implements ProcedureCoordinatorRpcs {
} catch (KeeperException e) { } catch (KeeperException e) {
coordinator.rpcConnectionFailure("Failed to get data for abort node:" + abortNode coordinator.rpcConnectionFailure("Failed to get data for abort node:" + abortNode
+ zkProc.getAbortZnode(), new IOException(e)); + zkProc.getAbortZnode(), new IOException(e));
} catch (InterruptedException e) {
coordinator.rpcConnectionFailure("Failed to get data for abort node:" + abortNode
+ zkProc.getAbortZnode(), new IOException(e));
Thread.currentThread().interrupt();
} }
coordinator.abortProcedure(procName, ee); coordinator.abortProcedure(procName, ee);
} }

View File

@ -221,6 +221,10 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs {
} catch (KeeperException e) { } catch (KeeperException e) {
member.controllerConnectionFailure("Failed to get data for new procedure:" + opName, member.controllerConnectionFailure("Failed to get data for new procedure:" + opName,
new IOException(e)); new IOException(e));
} catch (InterruptedException e) {
member.controllerConnectionFailure("Failed to get data for new procedure:" + opName,
new IOException(e));
Thread.currentThread().interrupt();
} }
} }
@ -330,6 +334,9 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs {
} catch (KeeperException e) { } catch (KeeperException e) {
member.controllerConnectionFailure("Failed to get data for abort znode:" + abortZNode member.controllerConnectionFailure("Failed to get data for abort znode:" + abortZNode
+ zkController.getAbortZnode(), new IOException(e)); + zkController.getAbortZnode(), new IOException(e));
} catch (InterruptedException e) {
LOG.warn("abort already in progress", e);
Thread.currentThread().interrupt();
} }
} }

View File

@ -4513,7 +4513,12 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
String nodePath = ZKUtil.joinZNode(this.zooKeeper.recoveringRegionsZNode, String nodePath = ZKUtil.joinZNode(this.zooKeeper.recoveringRegionsZNode,
region.getEncodedName()); region.getEncodedName());
// recovering-region level // recovering-region level
byte[] data = ZKUtil.getData(zkw, nodePath); byte[] data;
try {
data = ZKUtil.getData(zkw, nodePath);
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (data != null) { if (data != null) {
lastRecordedFlushedSequenceId = SplitLogManager.parseLastFlushedSequenceIdFrom(data); lastRecordedFlushedSequenceId = SplitLogManager.parseLastFlushedSequenceIdFrom(data);
} }

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.util;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.PrintWriter; import java.io.PrintWriter;
import java.io.StringWriter; import java.io.StringWriter;
import java.net.URI; import java.net.URI;
@ -1384,6 +1385,8 @@ public class HBaseFsck extends Configured {
} }
} catch (KeeperException ke) { } catch (KeeperException ke) {
throw new IOException(ke); throw new IOException(ke);
} catch (InterruptedException e) {
throw new InterruptedIOException();
} finally { } finally {
zkw.close(); zkw.close();
} }

View File

@ -131,7 +131,8 @@ public class ZKDataMigrator extends Configured implements Tool {
return 0; return 0;
} }
private void checkAndMigrateTableStatesToPB(ZooKeeperWatcher zkw) throws KeeperException { private void checkAndMigrateTableStatesToPB(ZooKeeperWatcher zkw) throws KeeperException,
InterruptedException {
List<String> tables = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); List<String> tables = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
if (tables == null) { if (tables == null) {
LOG.info("No table present to migrate table state to PB. returning.."); LOG.info("No table present to migrate table state to PB. returning..");
@ -154,7 +155,8 @@ public class ZKDataMigrator extends Configured implements Tool {
} }
} }
private void checkAndMigrateReplicationNodesToPB(ZooKeeperWatcher zkw) throws KeeperException { private void checkAndMigrateReplicationNodesToPB(ZooKeeperWatcher zkw) throws KeeperException,
InterruptedException {
String replicationZnodeName = getConf().get("zookeeper.znode.replication", "replication"); String replicationZnodeName = getConf().get("zookeeper.znode.replication", "replication");
String replicationPath = ZKUtil.joinZNode(zkw.baseZNode, replicationZnodeName); String replicationPath = ZKUtil.joinZNode(zkw.baseZNode, replicationZnodeName);
List<String> replicationZnodes = ZKUtil.listChildrenNoWatch(zkw, replicationPath); List<String> replicationZnodes = ZKUtil.listChildrenNoWatch(zkw, replicationPath);
@ -185,7 +187,7 @@ public class ZKDataMigrator extends Configured implements Tool {
} }
private void checkAndMigrateQueuesToPB(ZooKeeperWatcher zkw, String znode, String rs) private void checkAndMigrateQueuesToPB(ZooKeeperWatcher zkw, String znode, String rs)
throws KeeperException, NoNodeException { throws KeeperException, NoNodeException, InterruptedException {
String rsPath = ZKUtil.joinZNode(znode, rs); String rsPath = ZKUtil.joinZNode(znode, rs);
List<String> peers = ZKUtil.listChildrenNoWatch(zkw, rsPath); List<String> peers = ZKUtil.listChildrenNoWatch(zkw, rsPath);
if (peers == null || peers.isEmpty()) return; if (peers == null || peers.isEmpty()) return;
@ -207,7 +209,7 @@ public class ZKDataMigrator extends Configured implements Tool {
} }
private void checkAndMigratePeerZnodesToPB(ZooKeeperWatcher zkw, String znode, private void checkAndMigratePeerZnodesToPB(ZooKeeperWatcher zkw, String znode,
List<String> peers) throws KeeperException, NoNodeException { List<String> peers) throws KeeperException, NoNodeException, InterruptedException {
for (String peer : peers) { for (String peer : peers) {
String peerZnode = ZKUtil.joinZNode(znode, peer); String peerZnode = ZKUtil.joinZNode(znode, peer);
byte[] data = ZKUtil.getData(zkw, peerZnode); byte[] data = ZKUtil.getData(zkw, peerZnode);

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.zookeeper; package org.apache.hadoop.hbase.zookeeper;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.NavigableMap; import java.util.NavigableMap;
@ -98,6 +99,8 @@ public class RegionServerTracker extends ZooKeeperListener {
LOG.warn("Get Rs info port from ephemeral node", e); LOG.warn("Get Rs info port from ephemeral node", e);
} catch (IOException e) { } catch (IOException e) {
LOG.warn("Illegal data from ephemeral node", e); LOG.warn("Illegal data from ephemeral node", e);
} catch (InterruptedException e) {
throw new InterruptedIOException();
} }
this.regionServers.put(sn, rsInfoBuilder.build()); this.regionServers.put(sn, rsInfoBuilder.build());
} }

View File

@ -362,6 +362,10 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock {
} catch (KeeperException ex) { } catch (KeeperException ex) {
LOG.warn("Error processing lock metadata in " + lockZNode); LOG.warn("Error processing lock metadata in " + lockZNode);
return false; return false;
} catch (InterruptedException e) {
LOG.warn("InterruptedException processing lock metadata in " + lockZNode);
Thread.currentThread().interrupt();
return false;
} }
return true; return true;
} }

View File

@ -476,7 +476,7 @@ public class TestRegionPlacement {
* Verify the number of region movement is expected * Verify the number of region movement is expected
*/ */
private void verifyRegionMovementNum(int expected) private void verifyRegionMovementNum(int expected)
throws InterruptedException, HBaseIOException { throws InterruptedException, IOException {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster m = cluster.getMaster(); HMaster m = cluster.getMaster();
int lastRegionOpenedCount = m.assignmentManager.getNumRegionsOpened(); int lastRegionOpenedCount = m.assignmentManager.getNumRegionsOpened();

View File

@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
@ -1255,7 +1256,7 @@ public class TestSplitTransactionOnCluster {
} }
} }
private void waitUntilRegionServerDead() throws InterruptedException { private void waitUntilRegionServerDead() throws InterruptedException, InterruptedIOException {
// Wait until the master processes the RS shutdown // Wait until the master processes the RS shutdown
for (int i=0; cluster.getMaster().getClusterStatus(). for (int i=0; cluster.getMaster().getClusterStatus().
getServers().size() == NB_SERVERS && i<100; i++) { getServers().size() == NB_SERVERS && i<100; i++) {

View File

@ -50,7 +50,7 @@ public class TestZKTable {
@Test @Test
public void testTableStates() public void testTableStates()
throws ZooKeeperConnectionException, IOException, KeeperException { throws ZooKeeperConnectionException, IOException, KeeperException, InterruptedException {
final TableName name = final TableName name =
TableName.valueOf("testDisabled"); TableName.valueOf("testDisabled");
Abortable abortable = new Abortable() { Abortable abortable = new Abortable() {