HDFS-10960. TestDataNodeHotSwapVolumes#testRemoveVolumeBeingWritten fails at disk error verification after volume remove. (Manoj Govindassamy via lei)

This commit is contained in:
Lei Xu 2016-10-14 13:41:59 -07:00
parent adb96e109f
commit 8c520a27cb
1 changed files with 12 additions and 7 deletions

View File

@ -642,8 +642,6 @@ public class TestDataNodeHotSwapVolumes {
final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
final FileSystem fs = cluster.getFileSystem();
final Path testFile = new Path("/test");
final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck();
FSDataOutputStream out = fs.create(testFile, REPLICATION);
Random rb = new Random(0);
@ -699,17 +697,24 @@ public class TestDataNodeHotSwapVolumes {
reconfigThread.join();
// Verify if the data directory reconfigure was successful
FsDatasetSpi<? extends FsVolumeSpi> fsDatasetSpi = dn.getFSDataset();
try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
.getFsVolumeReferences()) {
for (int i =0; i < fsVolumeReferences.size(); i++) {
System.out.println("Vol: " +
fsVolumeReferences.get(i).getBaseURI().toString());
}
assertEquals("Volume remove wasn't successful.",
1, fsVolumeReferences.size());
}
// Verify the file has sufficient replications.
DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
// Read the content back
byte[] content = DFSTestUtil.readFileBuffer(fs, testFile);
assertEquals(BLOCK_SIZE, content.length);
// If an IOException thrown from BlockReceiver#run, it triggers
// DataNode#checkDiskError(). So we can test whether checkDiskError() is called,
// to see whether there is IOException in BlockReceiver#run().
assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck());
if (!exceptions.isEmpty()) {
throw new IOException(exceptions.get(0).getCause());
}