From 611d452bcf217c680ac7169848311c06d9ec0a23 Mon Sep 17 00:00:00 2001 From: Lei Xu Date: Tue, 30 May 2017 11:09:03 -0700 Subject: [PATCH] HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu. --- .../datanode/TestDataNodeHotSwapVolumes.java | 34 +++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 2d87614b066..9d140a1f793 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -97,6 +97,7 @@ public class TestDataNodeHotSwapVolumes { private static final int BLOCK_SIZE = 512; private static final int DEFAULT_STORAGES_PER_DATANODE = 2; private MiniDFSCluster cluster; + private Configuration conf; @After public void tearDown() { @@ -111,7 +112,7 @@ public class TestDataNodeHotSwapVolumes { private void startDFSCluster(int numNameNodes, int numDataNodes, int storagePerDataNode) throws IOException { shutdown(); - Configuration conf = new Configuration(); + conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); /* @@ -756,7 +757,7 @@ public class TestDataNodeHotSwapVolumes { } } - @Test(timeout=180000) + @Test(timeout=600000) public void testRemoveVolumeBeingWritten() throws InterruptedException, TimeoutException, ReconfigurationException, IOException, BrokenBarrierException { @@ -848,6 +849,9 @@ public class TestDataNodeHotSwapVolumes { 1, fsVolumeReferences.size()); } + // Add a new DataNode to help with the pipeline recover. + cluster.startDataNodes(conf, 1, true, null, null, null); + // Verify the file has sufficient replications. DFSTestUtil.waitReplication(fs, testFile, REPLICATION); // Read the content back @@ -857,6 +861,32 @@ public class TestDataNodeHotSwapVolumes { if (!exceptions.isEmpty()) { throw new IOException(exceptions.get(0).getCause()); } + + // Write more files to make sure that the DataNode that has removed volume + // is still alive to receive data. + for (int i = 0; i < 10; i++) { + final Path file = new Path("/after-" + i); + try (FSDataOutputStream fout = fs.create(file, REPLICATION)) { + rb.nextBytes(writeBuf); + fout.write(writeBuf); + } + } + + try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi + .getFsVolumeReferences()) { + assertEquals("Volume remove wasn't successful.", + 1, fsVolumeReferences.size()); + FsVolumeSpi volume = fsVolumeReferences.get(0); + String bpid = cluster.getNamesystem().getBlockPoolId(); + FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test"); + int blockCount = 0; + while (!blkIter.atEnd()) { + blkIter.nextBlock(); + blockCount++; + } + assertTrue(String.format("DataNode(%d) should have more than 1 blocks", + dataNodeIdx), blockCount > 1); + } } @Test(timeout=60000)