HDFS-8780. Fetching live/dead datanode list with arg true for removeDecommissionNode,returns list with decom node. (Contributed by J.Andreina)
(cherry picked from commit 10ab7d595e
)
This commit is contained in:
parent
52cc2ca488
commit
2893da4f53
|
@ -1031,6 +1031,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
|
|
||||||
HDFS-9063. Correctly handle snapshot path for getContentSummary. (jing9)
|
HDFS-9063. Correctly handle snapshot path for getContentSummary. (jing9)
|
||||||
|
|
||||||
|
HDFS-8780. Fetching live/dead datanode list with arg true for remove-
|
||||||
|
DecommissionNode,returns list with decom node. (J.Andreina via vinayakumab)
|
||||||
|
|
||||||
Release 2.7.2 - UNRELEASED
|
Release 2.7.2 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -788,44 +788,16 @@ public class DatanodeManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove an already decommissioned data node who is neither in include nor
|
* Remove decommissioned datanode from the the list of live or dead nodes.
|
||||||
* exclude hosts lists from the the list of live or dead nodes. This is used
|
* This is used to not to display a decommissioned datanode to the operators.
|
||||||
* to not display an already decommssioned data node to the operators.
|
* @param nodeList , array list of live or dead nodes.
|
||||||
* The operation procedure of making a already decommissioned data node not
|
|
||||||
* to be displayed is as following:
|
|
||||||
* <ol>
|
|
||||||
* <li>
|
|
||||||
* Host must have been in the include hosts list and the include hosts list
|
|
||||||
* must not be empty.
|
|
||||||
* </li>
|
|
||||||
* <li>
|
|
||||||
* Host is decommissioned by remaining in the include hosts list and added
|
|
||||||
* into the exclude hosts list. Name node is updated with the new
|
|
||||||
* information by issuing dfsadmin -refreshNodes command.
|
|
||||||
* </li>
|
|
||||||
* <li>
|
|
||||||
* Host is removed from both include hosts and exclude hosts lists. Name
|
|
||||||
* node is updated with the new informationby issuing dfsamin -refreshNodes
|
|
||||||
* command.
|
|
||||||
* <li>
|
|
||||||
* </ol>
|
|
||||||
*
|
|
||||||
* @param nodeList
|
|
||||||
* , array list of live or dead nodes.
|
|
||||||
*/
|
*/
|
||||||
private void removeDecomNodeFromList(final List<DatanodeDescriptor> nodeList) {
|
private void removeDecomNodeFromList(
|
||||||
// If the include list is empty, any nodes are welcomed and it does not
|
final List<DatanodeDescriptor> nodeList) {
|
||||||
// make sense to exclude any nodes from the cluster. Therefore, no remove.
|
Iterator<DatanodeDescriptor> it=null;
|
||||||
if (!hostFileManager.hasIncludes()) {
|
for (it = nodeList.iterator(); it.hasNext();) {
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (Iterator<DatanodeDescriptor> it = nodeList.iterator(); it.hasNext();) {
|
|
||||||
DatanodeDescriptor node = it.next();
|
DatanodeDescriptor node = it.next();
|
||||||
if ((!hostFileManager.isIncluded(node)) && (!hostFileManager.isExcluded(node))
|
if (node.isDecommissioned()) {
|
||||||
&& node.isDecommissioned()) {
|
|
||||||
// Include list is not empty, an existing datanode does not appear
|
|
||||||
// in both include or exclude lists and it has been decommissioned.
|
|
||||||
it.remove();
|
it.remove();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5068,7 +5068,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
@Override // FSNamesystemMBean
|
@Override // FSNamesystemMBean
|
||||||
public int getVolumeFailuresTotal() {
|
public int getVolumeFailuresTotal() {
|
||||||
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
||||||
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
|
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
|
||||||
int volumeFailuresTotal = 0;
|
int volumeFailuresTotal = 0;
|
||||||
for (DatanodeDescriptor node: live) {
|
for (DatanodeDescriptor node: live) {
|
||||||
volumeFailuresTotal += node.getVolumeFailures();
|
volumeFailuresTotal += node.getVolumeFailures();
|
||||||
|
@ -5079,7 +5079,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
@Override // FSNamesystemMBean
|
@Override // FSNamesystemMBean
|
||||||
public long getEstimatedCapacityLostTotal() {
|
public long getEstimatedCapacityLostTotal() {
|
||||||
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
||||||
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
|
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
|
||||||
long estimatedCapacityLostTotal = 0;
|
long estimatedCapacityLostTotal = 0;
|
||||||
for (DatanodeDescriptor node: live) {
|
for (DatanodeDescriptor node: live) {
|
||||||
VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
|
VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
|
||||||
|
@ -5886,7 +5886,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
final Map<String, Map<String,Object>> info =
|
final Map<String, Map<String,Object>> info =
|
||||||
new HashMap<String, Map<String,Object>>();
|
new HashMap<String, Map<String,Object>>();
|
||||||
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
||||||
blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
|
blockManager.getDatanodeManager().fetchDatanodes(live, null, false);
|
||||||
for (DatanodeDescriptor node : live) {
|
for (DatanodeDescriptor node : live) {
|
||||||
ImmutableMap.Builder<String, Object> innerinfo =
|
ImmutableMap.Builder<String, Object> innerinfo =
|
||||||
ImmutableMap.<String,Object>builder();
|
ImmutableMap.<String,Object>builder();
|
||||||
|
@ -5934,7 +5934,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
final Map<String, Map<String, Object>> info =
|
final Map<String, Map<String, Object>> info =
|
||||||
new HashMap<String, Map<String, Object>>();
|
new HashMap<String, Map<String, Object>>();
|
||||||
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
|
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
|
||||||
blockManager.getDatanodeManager().fetchDatanodes(null, dead, true);
|
blockManager.getDatanodeManager().fetchDatanodes(null, dead, false);
|
||||||
for (DatanodeDescriptor node : dead) {
|
for (DatanodeDescriptor node : dead) {
|
||||||
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
|
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
|
||||||
.put("lastContact", getLastContact(node))
|
.put("lastContact", getLastContact(node))
|
||||||
|
|
|
@ -1154,6 +1154,49 @@ public class TestDecommission {
|
||||||
decomManager.getNumPendingNodes());
|
decomManager.getNumPendingNodes());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetching Live DataNodes by passing removeDecommissionedNode value as
|
||||||
|
* false- returns LiveNodeList with Node in Decommissioned state
|
||||||
|
* true - returns LiveNodeList without Node in Decommissioned state
|
||||||
|
* @throws InterruptedException
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testCountOnDecommissionedNodeList() throws IOException{
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
|
||||||
|
try {
|
||||||
|
cluster =
|
||||||
|
new MiniDFSCluster.Builder(conf)
|
||||||
|
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
|
||||||
|
.numDataNodes(1).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
|
||||||
|
validateCluster(client, 1);
|
||||||
|
|
||||||
|
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
|
||||||
|
new ArrayList<ArrayList<DatanodeInfo>>(1);
|
||||||
|
namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(1));
|
||||||
|
|
||||||
|
// Move datanode1 to Decommissioned state
|
||||||
|
ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
|
||||||
|
decommissionNode(0, null,
|
||||||
|
decommissionedNode, AdminStates.DECOMMISSIONED);
|
||||||
|
|
||||||
|
FSNamesystem ns = cluster.getNamesystem(0);
|
||||||
|
DatanodeManager datanodeManager =
|
||||||
|
ns.getBlockManager().getDatanodeManager();
|
||||||
|
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
||||||
|
// fetchDatanode with false should return livedecommisioned node
|
||||||
|
datanodeManager.fetchDatanodes(live, null, false);
|
||||||
|
assertTrue(1==live.size());
|
||||||
|
// fetchDatanode with true should not return livedecommisioned node
|
||||||
|
datanodeManager.fetchDatanodes(live, null, true);
|
||||||
|
assertTrue(0==live.size());
|
||||||
|
}finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decommissioned node should not be considered while calculating node usage
|
* Decommissioned node should not be considered while calculating node usage
|
||||||
* @throws InterruptedException
|
* @throws InterruptedException
|
||||||
|
|
Loading…
Reference in New Issue