diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 184b7434583..7f903b69222 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2176,6 +2176,9 @@ Release 2.8.0 - UNRELEASED HDFS-9302. WebHDFS throws NullPointerException if newLength is not provided. (Jagadesh Kiran N via yliu) + HDFS-9297. Decomissioned capacity should not be considered for + configured/used capacity (Contributed by Kuhu Shukla) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java index 3ab0d5cb7a7..4c39c41fb5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java @@ -45,19 +45,20 @@ class DatanodeStats { private int expiredHeartbeats = 0; synchronized void add(final DatanodeDescriptor node) { - capacityUsed += node.getDfsUsed(); - blockPoolUsed += node.getBlockPoolUsed(); xceiverCount += node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { + capacityUsed += node.getDfsUsed(); + blockPoolUsed += node.getBlockPoolUsed(); nodesInService++; nodesInServiceXceiverCount += node.getXceiverCount(); capacityTotal += node.getCapacity(); capacityRemaining += node.getRemaining(); - } else { - capacityTotal += node.getDfsUsed(); + cacheCapacity += node.getCacheCapacity(); + cacheUsed += node.getCacheUsed(); + } else if (!node.isDecommissioned()) { + cacheCapacity += node.getCacheCapacity(); + cacheUsed += node.getCacheUsed(); } - cacheCapacity += node.getCacheCapacity(); - cacheUsed += node.getCacheUsed(); Set storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { statsMap.addStorage(storageInfo, node); @@ -69,19 +70,20 @@ class DatanodeStats { } synchronized void subtract(final DatanodeDescriptor node) { - capacityUsed -= node.getDfsUsed(); - blockPoolUsed -= node.getBlockPoolUsed(); xceiverCount -= node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { + capacityUsed -= node.getDfsUsed(); + blockPoolUsed -= node.getBlockPoolUsed(); nodesInService--; nodesInServiceXceiverCount -= node.getXceiverCount(); capacityTotal -= node.getCapacity(); capacityRemaining -= node.getRemaining(); - } else { - capacityTotal -= node.getDfsUsed(); + cacheCapacity -= node.getCacheCapacity(); + cacheUsed -= node.getCacheUsed(); + } else if (!node.isDecommissioned()) { + cacheCapacity -= node.getCacheCapacity(); + cacheUsed -= node.getCacheUsed(); } - cacheCapacity -= node.getCacheCapacity(); - cacheUsed -= node.getCacheUsed(); Set storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { statsMap.subtractStorage(storageInfo, node); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 3ae9e25b036..d648bca4630 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.log4j.Level; @@ -350,14 +351,13 @@ public class TestDecommission { for (int i = 0; i < 10; i++) { long[] newStats = namenode.getRpcServer().getStats(); - // For decommissioning nodes, ensure capacity of the DN is no longer - // counted. Only used space of the DN is counted in cluster capacity + // For decommissioning nodes, ensure capacity of the DN and dfsUsed + // is no longer counted towards total assertEquals(newStats[0], - decommissioning ? info.getDfsUsed() : info.getCapacity()); + decommissioning ? 0 : info.getCapacity()); - // Ensure cluster used capacity is counted for both normal and - // decommissioning nodes - assertEquals(newStats[1], info.getDfsUsed()); + // Ensure cluster used capacity is counted for normal nodes only + assertEquals(newStats[1], decommissioning ? 0 : info.getDfsUsed()); // For decommissioning nodes, remaining space from the DN is not counted assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining()); @@ -1264,4 +1264,39 @@ public class TestDecommission { cluster.shutdown(); } } + + @Test + public void testUsedCapacity() throws Exception { + int numNamenodes = 1; + int numDatanodes = 2; + + startCluster(numNamenodes,numDatanodes,conf); + cluster.waitActive(); + FSNamesystem ns = cluster.getNamesystem(0); + BlockManager blockManager = ns.getBlockManager(); + DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager() + .getDatanodeStatistics(); + + long initialUsedCapacity = datanodeStatistics.getCapacityUsed(); + long initialTotalCapacity = datanodeStatistics.getCapacityTotal(); + long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed(); + ArrayList> namenodeDecomList = + new ArrayList>(numNamenodes); + namenodeDecomList.add(0, new ArrayList(numDatanodes)); + ArrayList decommissionedNodes = namenodeDecomList.get(0); + //decommission one node + DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes, + AdminStates.DECOMMISSIONED); + decommissionedNodes.add(decomNode); + long newUsedCapacity = datanodeStatistics.getCapacityUsed(); + long newTotalCapacity = datanodeStatistics.getCapacityTotal(); + long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed(); + + assertTrue("DfsUsedCapacity should not be the same after a node has " + + "been decommissioned!", initialUsedCapacity != newUsedCapacity); + assertTrue("TotalCapacity should not be the same after a node has " + + "been decommissioned!", initialTotalCapacity != newTotalCapacity); + assertTrue("BlockPoolUsed should not be the same after a node has " + + "been decommissioned!",initialBlockPoolUsed != newBlockPoolUsed); + } }