diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index fb30323a59e..8ae1f506f49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2430,7 +2430,7 @@ public class BlockManager implements BlockStatsMXBean { return providedStorageMap.getCapacity(); } - public void updateHeartbeat(DatanodeDescriptor node, StorageReport[] reports, + void updateHeartbeat(DatanodeDescriptor node, StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int failedVolumes, VolumeFailureSummary volumeFailureSummary) { @@ -2441,6 +2441,17 @@ public class BlockManager implements BlockStatsMXBean { failedVolumes, volumeFailureSummary); } + void updateHeartbeatState(DatanodeDescriptor node, + StorageReport[] reports, long cacheCapacity, long cacheUsed, + int xceiverCount, int failedVolumes, + VolumeFailureSummary volumeFailureSummary) { + for (StorageReport report: reports) { + providedStorageMap.updateStorage(node, report.getStorage()); + } + node.updateHeartbeatState(reports, cacheCapacity, cacheUsed, xceiverCount, + failedVolumes, volumeFailureSummary); + } + /** * StatefulBlockInfo is used to build the "toUC" list, which is a list of * updates to the information about under-construction blocks. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 16ffb4346d5..46a4a7e1080 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -373,7 +373,7 @@ public class DatanodeDescriptor extends DatanodeInfo { /** * Updates stats from datanode heartbeat. */ - public void updateHeartbeat(StorageReport[] reports, long cacheCapacity, + void updateHeartbeat(StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int volFailures, VolumeFailureSummary volumeFailureSummary) { updateHeartbeatState(reports, cacheCapacity, cacheUsed, xceiverCount, @@ -384,7 +384,7 @@ public class DatanodeDescriptor extends DatanodeInfo { /** * process datanode heartbeat or stats initialization. */ - public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity, + void updateHeartbeatState(StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int volFailures, VolumeFailureSummary volumeFailureSummary) { updateStorageStats(reports, cacheCapacity, cacheUsed, xceiverCount, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java index 1972a6dcfac..d2c279f8b4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java @@ -251,7 +251,7 @@ class HeartbeatManager implements DatanodeStatistics { // updateHeartbeat, because we don't want to modify the // heartbeatedSinceRegistration flag. Arrival of a lifeline message does // not count as arrival of the first heartbeat. - node.updateHeartbeatState(reports, cacheCapacity, cacheUsed, + blockManager.updateHeartbeatState(node, reports, cacheCapacity, cacheUsed, xceiverCount, failedVolumes, volumeFailureSummary); stats.add(node); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java index 28427bce75a..14134e697db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; import static java.util.concurrent.TimeUnit.SECONDS; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LIFELINE_INTERVAL_SECONDS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; @@ -196,6 +197,10 @@ public class TestDataNodeLifeline { namesystem.getNumDeadDataNodes()); assertEquals("Expect DataNode not marked stale due to lifeline.", 0, namesystem.getNumStaleDataNodes()); + // add a new volume on the next heartbeat + cluster.getDataNodes().get(0).reconfigurePropertyImpl( + DFS_DATANODE_DATA_DIR_KEY, + cluster.getDataDirectory().concat("/data-new")); } // Verify that we did in fact call the lifeline RPC.