diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index c03b02b0ad6..158efd687a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -646,8 +646,6 @@ public class TestDataNodeHotSwapVolumes { final DataNode dn = cluster.getDataNodes().get(dataNodeIdx); final FileSystem fs = cluster.getFileSystem(); final Path testFile = new Path("/test"); - final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck(); - FSDataOutputStream out = fs.create(testFile, REPLICATION); Random rb = new Random(0); @@ -703,17 +701,24 @@ public class TestDataNodeHotSwapVolumes { reconfigThread.join(); + // Verify if the data directory reconfigure was successful + FsDatasetSpi fsDatasetSpi = dn.getFSDataset(); + try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi + .getFsVolumeReferences()) { + for (int i =0; i < fsVolumeReferences.size(); i++) { + System.out.println("Vol: " + + fsVolumeReferences.get(i).getBaseURI().toString()); + } + assertEquals("Volume remove wasn't successful.", + 1, fsVolumeReferences.size()); + } + // Verify the file has sufficient replications. DFSTestUtil.waitReplication(fs, testFile, REPLICATION); // Read the content back byte[] content = DFSTestUtil.readFileBuffer(fs, testFile); assertEquals(BLOCK_SIZE, content.length); - // If an IOException thrown from BlockReceiver#run, it triggers - // DataNode#checkDiskError(). So we can test whether checkDiskError() is called, - // to see whether there is IOException in BlockReceiver#run(). - assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck()); - if (!exceptions.isEmpty()) { throw new IOException(exceptions.get(0).getCause()); }