HDFS-8056. Decommissioned dead nodes should continue to be counted as dead after NN restart. (mingma)

This commit is contained in:
Ming Ma 2015-11-19 10:04:01 -08:00
parent ac1aa6c819
commit 1c4951a7a0
4 changed files with 43 additions and 2 deletions

View File

@ -1657,6 +1657,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9252. Change TestFileTruncate to use FsDatasetTestUtils to get block HDFS-9252. Change TestFileTruncate to use FsDatasetTestUtils to get block
file size and genstamp. (Lei (Eddy) Xu via cmccabe) file size and genstamp. (Lei (Eddy) Xu via cmccabe)
HDFS-8056. Decommissioned dead nodes should continue to be counted as dead
after NN restart. (mingma)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -1272,7 +1272,7 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
if (listDeadNodes) { if (listDeadNodes) {
for (InetSocketAddress addr : includedNodes) { for (InetSocketAddress addr : includedNodes) {
if (foundNodes.matchedBy(addr) || excludedNodes.match(addr)) { if (foundNodes.matchedBy(addr)) {
continue; continue;
} }
// The remaining nodes are ones that are referenced by the hosts // The remaining nodes are ones that are referenced by the hosts
@ -1289,6 +1289,9 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
addr.getPort() == 0 ? defaultXferPort : addr.getPort(), addr.getPort() == 0 ? defaultXferPort : addr.getPort(),
defaultInfoPort, defaultInfoSecurePort, defaultIpcPort)); defaultInfoPort, defaultInfoSecurePort, defaultIpcPort));
setDatanodeDead(dn); setDatanodeDead(dn);
if (excludedNodes.match(addr)) {
dn.setDecommissioned();
}
nodes.add(dn); nodes.add(dn);
} }
} }

View File

@ -923,6 +923,41 @@ public void testDecommissionWithNamenodeRestart()throws IOException, Interrupted
cluster.shutdown(); cluster.shutdown();
} }
/**
* Tests dead node count after restart of namenode
**/
@Test(timeout=360000)
public void testDeadNodeCountAfterNamenodeRestart()throws Exception {
LOG.info("Starting test testDeadNodeCountAfterNamenodeRestart");
int numNamenodes = 1;
int numDatanodes = 2;
startCluster(numNamenodes, numDatanodes, conf);
DFSClient client = getDfsClient(cluster.getNameNode(), conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
DatanodeInfo excludedDatanode = info[0];
String excludedDatanodeName = info[0].getXferAddr();
writeConfigFile(hostsFile, new ArrayList<String>(Arrays.asList(
excludedDatanodeName, info[1].getXferAddr())));
decommissionNode(0, excludedDatanode.getDatanodeUuid(), null,
AdminStates.DECOMMISSIONED);
cluster.stopDataNode(excludedDatanodeName);
DFSTestUtil.waitForDatanodeState(
cluster, excludedDatanode.getDatanodeUuid(), false, 20000);
//Restart the namenode
cluster.restartNameNode();
assertEquals("There should be one node alive", 1,
client.datanodeReport(DatanodeReportType.LIVE).length);
assertEquals("There should be one node dead", 1,
client.datanodeReport(DatanodeReportType.DEAD).length);
cluster.shutdown();
}
/** /**
* Test using a "registration name" in a host include file. * Test using a "registration name" in a host include file.
* *

View File

@ -151,7 +151,7 @@ public void testIncludeExcludeLists() throws IOException {
Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
.DatanodeReportType.DEAD).size()); .DatanodeReportType.DEAD).size());
excludedNodes.add(entry("127.0.0.3")); excludedNodes.add(entry("127.0.0.3"));
Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
.DatanodeReportType.DEAD).size()); .DatanodeReportType.DEAD).size());
} }
} }