HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.

(cherry picked from commit 91d6fe151f)
This commit is contained in:
Lei Xu 2017-05-30 11:09:03 -07:00
parent f72f194e24
commit a3ad1a39c5
1 changed files with 32 additions and 2 deletions

View File

@ -98,6 +98,7 @@ public class TestDataNodeHotSwapVolumes {
private static final int BLOCK_SIZE = 512; private static final int BLOCK_SIZE = 512;
private static final int DEFAULT_STORAGES_PER_DATANODE = 2; private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private Configuration conf;
@After @After
public void tearDown() { public void tearDown() {
@ -112,7 +113,7 @@ public class TestDataNodeHotSwapVolumes {
private void startDFSCluster(int numNameNodes, int numDataNodes, private void startDFSCluster(int numNameNodes, int numDataNodes,
int storagePerDataNode) throws IOException { int storagePerDataNode) throws IOException {
shutdown(); shutdown();
Configuration conf = new Configuration(); conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
/* /*
@ -758,7 +759,7 @@ public class TestDataNodeHotSwapVolumes {
} }
} }
@Test(timeout=180000) @Test(timeout=600000)
public void testRemoveVolumeBeingWritten() public void testRemoveVolumeBeingWritten()
throws InterruptedException, TimeoutException, ReconfigurationException, throws InterruptedException, TimeoutException, ReconfigurationException,
IOException, BrokenBarrierException { IOException, BrokenBarrierException {
@ -849,6 +850,9 @@ public class TestDataNodeHotSwapVolumes {
1, fsVolumeReferences.size()); 1, fsVolumeReferences.size());
} }
// Add a new DataNode to help with the pipeline recover.
cluster.startDataNodes(conf, 1, true, null, null, null);
// Verify the file has sufficient replications. // Verify the file has sufficient replications.
DFSTestUtil.waitReplication(fs, testFile, REPLICATION); DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
// Read the content back // Read the content back
@ -858,6 +862,32 @@ public class TestDataNodeHotSwapVolumes {
if (!exceptions.isEmpty()) { if (!exceptions.isEmpty()) {
throw new IOException(exceptions.get(0).getCause()); throw new IOException(exceptions.get(0).getCause());
} }
// Write more files to make sure that the DataNode that has removed volume
// is still alive to receive data.
for (int i = 0; i < 10; i++) {
final Path file = new Path("/after-" + i);
try (FSDataOutputStream fout = fs.create(file, REPLICATION)) {
rb.nextBytes(writeBuf);
fout.write(writeBuf);
}
}
try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
.getFsVolumeReferences()) {
assertEquals("Volume remove wasn't successful.",
1, fsVolumeReferences.size());
FsVolumeSpi volume = fsVolumeReferences.get(0);
String bpid = cluster.getNamesystem().getBlockPoolId();
FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test");
int blockCount = 0;
while (!blkIter.atEnd()) {
blkIter.nextBlock();
blockCount++;
}
assertTrue(String.format("DataNode(%d) should have more than 1 blocks",
dataNodeIdx), blockCount > 1);
}
} }
@Test(timeout=60000) @Test(timeout=60000)