diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java index ce5054761a7..75a91ba7188 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.Daemon; -import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,11 +103,6 @@ public class DeadNodeDetector implements Runnable { private Map probeInProg = new ConcurrentHashMap(); - /** - * The last time when detect dead node. - */ - private long lastDetectDeadTS = 0; - /** * Interval time in milliseconds for probing dead node behavior. */ @@ -416,20 +410,15 @@ private void probeCallBack(Probe probe, boolean success) { * Check dead node periodically. */ private void checkDeadNodes() { - long ts = Time.monotonicNow(); - if (ts - lastDetectDeadTS > deadNodeDetectInterval) { - Set datanodeInfos = clearAndGetDetectedDeadNodes(); - for (DatanodeInfo datanodeInfo : datanodeInfos) { - LOG.debug("Add dead node to check: {}.", datanodeInfo); - if (!deadNodesProbeQueue.offer(datanodeInfo)) { - LOG.debug("Skip to add dead node {} to check " + - "since the probe queue is full.", datanodeInfo); - break; - } + Set datanodeInfos = clearAndGetDetectedDeadNodes(); + for (DatanodeInfo datanodeInfo : datanodeInfos) { + LOG.debug("Add dead node to check: {}.", datanodeInfo); + if (!deadNodesProbeQueue.offer(datanodeInfo)) { + LOG.debug("Skip to add dead node {} to check " + + "since the probe queue is full.", datanodeInfo); + break; } - lastDetectDeadTS = ts; } - state = State.IDLE; }