HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.
This commit is contained in:
parent
71c34c7155
commit
611d452bcf
|
@ -97,6 +97,7 @@ public class TestDataNodeHotSwapVolumes {
|
||||||
private static final int BLOCK_SIZE = 512;
|
private static final int BLOCK_SIZE = 512;
|
||||||
private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
|
private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
|
||||||
private MiniDFSCluster cluster;
|
private MiniDFSCluster cluster;
|
||||||
|
private Configuration conf;
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void tearDown() {
|
public void tearDown() {
|
||||||
|
@ -111,7 +112,7 @@ public class TestDataNodeHotSwapVolumes {
|
||||||
private void startDFSCluster(int numNameNodes, int numDataNodes,
|
private void startDFSCluster(int numNameNodes, int numDataNodes,
|
||||||
int storagePerDataNode) throws IOException {
|
int storagePerDataNode) throws IOException {
|
||||||
shutdown();
|
shutdown();
|
||||||
Configuration conf = new Configuration();
|
conf = new Configuration();
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -756,7 +757,7 @@ public class TestDataNodeHotSwapVolumes {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=180000)
|
@Test(timeout=600000)
|
||||||
public void testRemoveVolumeBeingWritten()
|
public void testRemoveVolumeBeingWritten()
|
||||||
throws InterruptedException, TimeoutException, ReconfigurationException,
|
throws InterruptedException, TimeoutException, ReconfigurationException,
|
||||||
IOException, BrokenBarrierException {
|
IOException, BrokenBarrierException {
|
||||||
|
@ -848,6 +849,9 @@ public class TestDataNodeHotSwapVolumes {
|
||||||
1, fsVolumeReferences.size());
|
1, fsVolumeReferences.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add a new DataNode to help with the pipeline recover.
|
||||||
|
cluster.startDataNodes(conf, 1, true, null, null, null);
|
||||||
|
|
||||||
// Verify the file has sufficient replications.
|
// Verify the file has sufficient replications.
|
||||||
DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
|
DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
|
||||||
// Read the content back
|
// Read the content back
|
||||||
|
@ -857,6 +861,32 @@ public class TestDataNodeHotSwapVolumes {
|
||||||
if (!exceptions.isEmpty()) {
|
if (!exceptions.isEmpty()) {
|
||||||
throw new IOException(exceptions.get(0).getCause());
|
throw new IOException(exceptions.get(0).getCause());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write more files to make sure that the DataNode that has removed volume
|
||||||
|
// is still alive to receive data.
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
final Path file = new Path("/after-" + i);
|
||||||
|
try (FSDataOutputStream fout = fs.create(file, REPLICATION)) {
|
||||||
|
rb.nextBytes(writeBuf);
|
||||||
|
fout.write(writeBuf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
|
||||||
|
.getFsVolumeReferences()) {
|
||||||
|
assertEquals("Volume remove wasn't successful.",
|
||||||
|
1, fsVolumeReferences.size());
|
||||||
|
FsVolumeSpi volume = fsVolumeReferences.get(0);
|
||||||
|
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
|
FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test");
|
||||||
|
int blockCount = 0;
|
||||||
|
while (!blkIter.atEnd()) {
|
||||||
|
blkIter.nextBlock();
|
||||||
|
blockCount++;
|
||||||
|
}
|
||||||
|
assertTrue(String.format("DataNode(%d) should have more than 1 blocks",
|
||||||
|
dataNodeIdx), blockCount > 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
|
|
Loading…
Reference in New Issue