diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 158efd687a5..c03b02b0ad6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -646,6 +646,8 @@ public class TestDataNodeHotSwapVolumes { final DataNode dn = cluster.getDataNodes().get(dataNodeIdx); final FileSystem fs = cluster.getFileSystem(); final Path testFile = new Path("/test"); + final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck(); + FSDataOutputStream out = fs.create(testFile, REPLICATION); Random rb = new Random(0); @@ -701,24 +703,17 @@ public class TestDataNodeHotSwapVolumes { reconfigThread.join(); - // Verify if the data directory reconfigure was successful - FsDatasetSpi fsDatasetSpi = dn.getFSDataset(); - try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi - .getFsVolumeReferences()) { - for (int i =0; i < fsVolumeReferences.size(); i++) { - System.out.println("Vol: " + - fsVolumeReferences.get(i).getBaseURI().toString()); - } - assertEquals("Volume remove wasn't successful.", - 1, fsVolumeReferences.size()); - } - // Verify the file has sufficient replications. DFSTestUtil.waitReplication(fs, testFile, REPLICATION); // Read the content back byte[] content = DFSTestUtil.readFileBuffer(fs, testFile); assertEquals(BLOCK_SIZE, content.length); + // If an IOException thrown from BlockReceiver#run, it triggers + // DataNode#checkDiskError(). So we can test whether checkDiskError() is called, + // to see whether there is IOException in BlockReceiver#run(). + assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck()); + if (!exceptions.isEmpty()) { throw new IOException(exceptions.get(0).getCause()); }