diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 44774a7c8c8..af7118a431b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1378,6 +1378,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9063. Correctly handle snapshot path for getContentSummary. (jing9)
+ HDFS-8780. Fetching live/dead datanode list with arg true for remove-
+ DecommissionNode,returns list with decom node. (J.Andreina via vinayakumab)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 31149372bb5..a484fccf9c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -788,45 +788,16 @@ public class DatanodeManager {
}
/**
- * Remove an already decommissioned data node who is neither in include nor
- * exclude hosts lists from the the list of live or dead nodes. This is used
- * to not display an already decommssioned data node to the operators.
- * The operation procedure of making a already decommissioned data node not
- * to be displayed is as following:
- *
- * -
- * Host must have been in the include hosts list and the include hosts list
- * must not be empty.
- *
- * -
- * Host is decommissioned by remaining in the include hosts list and added
- * into the exclude hosts list. Name node is updated with the new
- * information by issuing dfsadmin -refreshNodes command.
- *
- * -
- * Host is removed from both include hosts and exclude hosts lists. Name
- * node is updated with the new informationby issuing dfsamin -refreshNodes
- * command.
- *
-
- *
- *
- * @param nodeList
- * , array list of live or dead nodes.
+ * Remove decommissioned datanode from the the list of live or dead nodes.
+ * This is used to not to display a decommissioned datanode to the operators.
+ * @param nodeList , array list of live or dead nodes.
*/
- private void removeDecomNodeFromList(final List nodeList) {
- // If the include list is empty, any nodes are welcomed and it does not
- // make sense to exclude any nodes from the cluster. Therefore, no remove.
- if (!hostFileManager.hasIncludes()) {
- return;
- }
-
- for (Iterator it = nodeList.iterator(); it.hasNext();) {
+ private void removeDecomNodeFromList(
+ final List nodeList) {
+ Iterator it=null;
+ for (it = nodeList.iterator(); it.hasNext();) {
DatanodeDescriptor node = it.next();
- if ((!hostFileManager.isIncluded(node)) && (!hostFileManager.isExcluded(node))
- && node.isDecommissioned()) {
- // Include list is not empty, an existing datanode does not appear
- // in both include or exclude lists and it has been decommissioned.
- // Remove it from the node list.
+ if (node.isDecommissioned()) {
it.remove();
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 75b6be90c33..4a9d13b4948 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5073,7 +5073,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
@Override // FSNamesystemMBean
public int getVolumeFailuresTotal() {
List live = new ArrayList();
- getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
+ getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
int volumeFailuresTotal = 0;
for (DatanodeDescriptor node: live) {
volumeFailuresTotal += node.getVolumeFailures();
@@ -5084,7 +5084,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
@Override // FSNamesystemMBean
public long getEstimatedCapacityLostTotal() {
List live = new ArrayList();
- getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
+ getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
long estimatedCapacityLostTotal = 0;
for (DatanodeDescriptor node: live) {
VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
@@ -5891,7 +5891,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
final Map> info =
new HashMap>();
final List live = new ArrayList();
- blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
+ blockManager.getDatanodeManager().fetchDatanodes(live, null, false);
for (DatanodeDescriptor node : live) {
ImmutableMap.Builder innerinfo =
ImmutableMap.builder();
@@ -5939,7 +5939,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
final Map> info =
new HashMap>();
final List dead = new ArrayList();
- blockManager.getDatanodeManager().fetchDatanodes(null, dead, true);
+ blockManager.getDatanodeManager().fetchDatanodes(null, dead, false);
for (DatanodeDescriptor node : dead) {
Map innerinfo = ImmutableMap.builder()
.put("lastContact", getLastContact(node))
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 7c30361ee2c..c1fdd2527ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -1128,6 +1128,49 @@ public class TestDecommission {
decomManager.getNumPendingNodes());
}
+ /**
+ * Fetching Live DataNodes by passing removeDecommissionedNode value as
+ * false- returns LiveNodeList with Node in Decommissioned state
+ * true - returns LiveNodeList without Node in Decommissioned state
+ * @throws InterruptedException
+ */
+ @Test
+ public void testCountOnDecommissionedNodeList() throws IOException{
+ conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
+ try {
+ cluster =
+ new MiniDFSCluster.Builder(conf)
+ .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
+ .numDataNodes(1).build();
+ cluster.waitActive();
+ DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
+ validateCluster(client, 1);
+
+ ArrayList> namenodeDecomList =
+ new ArrayList>(1);
+ namenodeDecomList.add(0, new ArrayList(1));
+
+ // Move datanode1 to Decommissioned state
+ ArrayList decommissionedNode = namenodeDecomList.get(0);
+ decommissionNode(0, null,
+ decommissionedNode, AdminStates.DECOMMISSIONED);
+
+ FSNamesystem ns = cluster.getNamesystem(0);
+ DatanodeManager datanodeManager =
+ ns.getBlockManager().getDatanodeManager();
+ List live = new ArrayList();
+ // fetchDatanode with false should return livedecommisioned node
+ datanodeManager.fetchDatanodes(live, null, false);
+ assertTrue(1==live.size());
+ // fetchDatanode with true should not return livedecommisioned node
+ datanodeManager.fetchDatanodes(live, null, true);
+ assertTrue(0==live.size());
+ }finally {
+ cluster.shutdown();
+ }
+ }
+
/**
* Decommissioned node should not be considered while calculating node usage
* @throws InterruptedException