From 209fd3f9fc65dde0446763ada5301a55a4dbf815 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Fri, 8 Jul 2011 00:16:07 +0000 Subject: [PATCH] HDFS-2111. Add tests for ensuring that the DN will start with a few bad data directories. Contributed by Harsh J Chouraria. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1144100 13f79535-47bb-0310-9956-ffa450edef68 --- hdfs/CHANGES.txt | 3 ++ .../TestDataNodeVolumeFailureToleration.java | 49 ++++++++++++++++++- 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/hdfs/CHANGES.txt b/hdfs/CHANGES.txt index 32ea9ec01a3..6054c7a452d 100644 --- a/hdfs/CHANGES.txt +++ b/hdfs/CHANGES.txt @@ -540,6 +540,9 @@ Trunk (unreleased changes) HDFS-2109. Store uMask as member variable to DFSClient.Conf. (Bharath Mundlapudi via szetszwo) + HDFS-2111. Add tests for ensuring that the DN will start with a few bad + data directories. (Harsh J Chouraria via todd) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java index d569e2e086b..d5f65c256e1 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java @@ -24,7 +24,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -93,6 +92,54 @@ public class TestDataNodeVolumeFailureToleration { cluster.shutdown(); } + /** + * Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration + * option, ie the DN tolerates a failed-to-use scenario during + * its start-up. + */ + @Test + public void testValidVolumesAtStartup() throws Exception { + assumeTrue(!System.getProperty("os.name").startsWith("Windows")); + + // Make sure no DNs are running. + cluster.shutdownDataNodes(); + + // Bring up a datanode with two default data dirs, but with one bad one. + conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1); + + // We use subdirectories 0 and 1 in order to have only a single + // data dir's parent inject a failure. + File tld = new File(MiniDFSCluster.getBaseDirectory(), "badData"); + File dataDir1 = new File(tld, "data1"); + File dataDir1Actual = new File(dataDir1, "1"); + dataDir1Actual.mkdirs(); + // Force an IOE to occur on one of the dfs.data.dir. + File dataDir2 = new File(tld, "data2"); + prepareDirToFail(dataDir2); + File dataDir2Actual = new File(dataDir2, "2"); + + // Start one DN, with manually managed DN dir + conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, + dataDir1Actual.getPath() + "," + dataDir2Actual.getPath()); + cluster.startDataNodes(conf, 1, false, null, null); + cluster.waitActive(); + + try { + assertTrue("The DN should have started up fine.", + cluster.isDataNodeUp()); + DataNode dn = cluster.getDataNodes().get(0); + String si = dn.getFSDataset().getStorageInfo(); + assertTrue("The DN should have started with this directory", + si.contains(dataDir1Actual.getPath())); + assertFalse("The DN shouldn't have a bad directory.", + si.contains(dataDir2Actual.getPath())); + } finally { + cluster.shutdownDataNodes(); + FileUtil.chmod(dataDir2.toString(), "755"); + } + + } + /** * Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration * option, ie the DN shuts itself down when the number of failures