From a3ad1a39c5b8f2eb438475ba69a8d6552fd42661 Mon Sep 17 00:00:00 2001 From: Lei Xu Date: Tue, 30 May 2017 11:09:03 -0700 Subject: [PATCH] HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu. (cherry picked from commit 91d6fe151f2e3de21b0a9423ade921e771957d90) --- .../datanode/TestDataNodeHotSwapVolumes.java | 34 +++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 7edf5ca2ab7..b97f5c84af1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -98,6 +98,7 @@ public class TestDataNodeHotSwapVolumes { private static final int BLOCK_SIZE = 512; private static final int DEFAULT_STORAGES_PER_DATANODE = 2; private MiniDFSCluster cluster; + private Configuration conf; @After public void tearDown() { @@ -112,7 +113,7 @@ public class TestDataNodeHotSwapVolumes { private void startDFSCluster(int numNameNodes, int numDataNodes, int storagePerDataNode) throws IOException { shutdown(); - Configuration conf = new Configuration(); + conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); /* @@ -758,7 +759,7 @@ public class TestDataNodeHotSwapVolumes { } } - @Test(timeout=180000) + @Test(timeout=600000) public void testRemoveVolumeBeingWritten() throws InterruptedException, TimeoutException, ReconfigurationException, IOException, BrokenBarrierException { @@ -849,6 +850,9 @@ public class TestDataNodeHotSwapVolumes { 1, fsVolumeReferences.size()); } + // Add a new DataNode to help with the pipeline recover. + cluster.startDataNodes(conf, 1, true, null, null, null); + // Verify the file has sufficient replications. DFSTestUtil.waitReplication(fs, testFile, REPLICATION); // Read the content back @@ -858,6 +862,32 @@ public class TestDataNodeHotSwapVolumes { if (!exceptions.isEmpty()) { throw new IOException(exceptions.get(0).getCause()); } + + // Write more files to make sure that the DataNode that has removed volume + // is still alive to receive data. + for (int i = 0; i < 10; i++) { + final Path file = new Path("/after-" + i); + try (FSDataOutputStream fout = fs.create(file, REPLICATION)) { + rb.nextBytes(writeBuf); + fout.write(writeBuf); + } + } + + try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi + .getFsVolumeReferences()) { + assertEquals("Volume remove wasn't successful.", + 1, fsVolumeReferences.size()); + FsVolumeSpi volume = fsVolumeReferences.get(0); + String bpid = cluster.getNamesystem().getBlockPoolId(); + FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test"); + int blockCount = 0; + while (!blkIter.atEnd()) { + blkIter.nextBlock(); + blockCount++; + } + assertTrue(String.format("DataNode(%d) should have more than 1 blocks", + dataNodeIdx), blockCount > 1); + } } @Test(timeout=60000)