From 8d1fbf786b8288710a2eb0fe7ad7c9d852017e94 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Fri, 14 Oct 2016 16:37:51 -0500 Subject: [PATCH] Revert "HDFS-10960. TestDataNodeHotSwapVolumes#testRemoveVolumeBeingWritten fails at disk error verification after volume remove. (Manoj Govindassamy via lei)" This reverts commit f981dd1bca1006f34f55361ba0d72e5d0a621753. --- .../datanode/TestDataNodeHotSwapVolumes.java | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 158efd687a5..c03b02b0ad6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -646,6 +646,8 @@ private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx) final DataNode dn = cluster.getDataNodes().get(dataNodeIdx); final FileSystem fs = cluster.getFileSystem(); final Path testFile = new Path("/test"); + final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck(); + FSDataOutputStream out = fs.create(testFile, REPLICATION); Random rb = new Random(0); @@ -701,24 +703,17 @@ public void run() { reconfigThread.join(); - // Verify if the data directory reconfigure was successful - FsDatasetSpi fsDatasetSpi = dn.getFSDataset(); - try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi - .getFsVolumeReferences()) { - for (int i =0; i < fsVolumeReferences.size(); i++) { - System.out.println("Vol: " + - fsVolumeReferences.get(i).getBaseURI().toString()); - } - assertEquals("Volume remove wasn't successful.", - 1, fsVolumeReferences.size()); - } - // Verify the file has sufficient replications. DFSTestUtil.waitReplication(fs, testFile, REPLICATION); // Read the content back byte[] content = DFSTestUtil.readFileBuffer(fs, testFile); assertEquals(BLOCK_SIZE, content.length); + // If an IOException thrown from BlockReceiver#run, it triggers + // DataNode#checkDiskError(). So we can test whether checkDiskError() is called, + // to see whether there is IOException in BlockReceiver#run(). + assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck()); + if (!exceptions.isEmpty()) { throw new IOException(exceptions.get(0).getCause()); }