HDFS-11399. Many tests fails in Windows due to injecting disk failures. Contributed by Yiqun Lin.
(cherry picked from commit ac627f561f
)
This commit is contained in:
parent
41a3f378bc
commit
2bda1ffe72
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
@ -160,6 +161,10 @@ public class TestBlockStatsMXBean {
|
|||
|
||||
@Test
|
||||
public void testStorageTypeStatsWhenStorageFailed() throws Exception {
|
||||
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
|
||||
// volume failures which is currently not supported on Windows.
|
||||
assumeNotWindows();
|
||||
|
||||
DFSTestUtil.createFile(cluster.getFileSystem(),
|
||||
new Path("/blockStatsFile1"), 1024, (short) 1, 0L);
|
||||
Map<StorageType, StorageTypeStats> storageTypeStatsMap = cluster
|
||||
|
|
|
@ -293,6 +293,10 @@ public class TestDataNodeVolumeFailure {
|
|||
@Test(timeout=10000)
|
||||
public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
|
||||
throws Exception {
|
||||
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
|
||||
// volume failures which is currently not supported on Windows.
|
||||
assumeNotWindows();
|
||||
|
||||
// make both data directories to fail on dn0
|
||||
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
||||
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
|
||||
|
@ -314,6 +318,10 @@ public class TestDataNodeVolumeFailure {
|
|||
@Test
|
||||
public void testVolumeFailureRecoveredByHotSwappingVolume()
|
||||
throws Exception {
|
||||
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
|
||||
// volume failures which is currently not supported on Windows.
|
||||
assumeNotWindows();
|
||||
|
||||
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
||||
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
|
||||
final DataNode dn0 = cluster.getDataNodes().get(0);
|
||||
|
@ -354,6 +362,10 @@ public class TestDataNodeVolumeFailure {
|
|||
@Test
|
||||
public void testTolerateVolumeFailuresAfterAddingMoreVolumes()
|
||||
throws Exception {
|
||||
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
|
||||
// volume failures which is currently not supported on Windows.
|
||||
assumeNotWindows();
|
||||
|
||||
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
||||
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
|
||||
final File dn0VolNew = new File(dataDir, "data_new");
|
||||
|
|
Loading…
Reference in New Issue