diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ac037f42d61..aa477544932 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -281,7 +281,10 @@ Release 2.8.0 - UNRELEASED configured zero. (Surendra Singh Lilhore via Arpit Agarwal) HDFS-8229. LAZY_PERSIST file gets deleted after NameNode restart. - (Surendra Singh Lilhore via Arpit Agarwal) + (Surendra Singh Lilhore via Arpit Agarwal) + + HDFS-8309. Skip unit test using DataNodeTestUtils#injectDataDirFailure() on Windows. + (xyao) Release 2.7.1 - UNRELEASED diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 668084b84cb..315529caee1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -78,6 +78,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doAnswer; @@ -705,6 +706,10 @@ public class TestDataNodeHotSwapVolumes { public void testDirectlyReloadAfterCheckDiskError() throws IOException, TimeoutException, InterruptedException, ReconfigurationException { + // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate + // volume failures which is currently not supported on Windows. + assumeTrue(!Path.WINDOWS); + startDFSCluster(1, 2); createFile(new Path("/test"), 32, (short)2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 0a90947d4df..0d158c9a6a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -207,8 +207,12 @@ public class TestDataNodeVolumeFailure { * after failure. */ @Test(timeout=150000) - public void testFailedVolumeBeingRemovedFromDataNode() + public void testFailedVolumeBeingRemovedFromDataNode() throws InterruptedException, IOException, TimeoutException { + // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate + // volume failures which is currently not supported on Windows. + assumeTrue(!Path.WINDOWS); + Path file1 = new Path("/test1"); DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L); DFSTestUtil.waitReplication(fs, file1, (short) 2); @@ -270,9 +274,8 @@ public class TestDataNodeVolumeFailure { */ @Test public void testUnderReplicationAfterVolFailure() throws Exception { - // This test relies on denying access to data volumes to simulate data volume - // failure. This doesn't work on Windows, because an owner of an object - // always has the ability to read and change permissions on the object. + // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate + // volume failures which is currently not supported on Windows. assumeTrue(!Path.WINDOWS); // Bring up one more datanode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index aac288aedd7..e0728dc2c4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -76,9 +76,8 @@ public class TestDataNodeVolumeFailureReporting { @Before public void setUp() throws Exception { - // These tests simulate volume failures by denying execute permission on the - // volume's path. On Windows, the owner of an object is always allowed - // access, so we can't run these tests on Windows. + // These tests use DataNodeTestUtils#injectDataDirFailure() to simulate + // volume failures which is currently not supported on Windows. assumeTrue(!Path.WINDOWS); // Allow a single volume failure (there are two volumes) initCluster(1, 2, 1);