diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5e6d4e90703..d1b5a6bb27a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -110,6 +110,9 @@ Trunk (unreleased changes) HDFS-2404. webhdfs liststatus json response is not correct. (suresh) + HDFS-2422. The NN should tolerate the same number of low-resource volumes + as failed volumes (atm) + Release 0.23.0 - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java index 24f999e1708..4d7cfd8fa92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java @@ -32,6 +32,8 @@ import org.apache.hadoop.fs.DF; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.common.Util; +import com.google.common.annotations.VisibleForTesting; + /** * * NameNodeResourceChecker provides a method - @@ -91,15 +93,16 @@ public class NameNodeResourceChecker { } /** - * Return true if disk space is available on all all the configured volumes. + * Return true if disk space is available on at least one of the configured + * volumes. * - * @return True if the configured amount of disk space is available on all - * volumes, false otherwise. + * @return True if the configured amount of disk space is available on at + * least one volume, false otherwise. * @throws IOException */ boolean hasAvailableDiskSpace() throws IOException { - return getVolumesLowOnSpace().size() == 0; + return getVolumesLowOnSpace().size() < volumes.size(); } /** @@ -127,4 +130,9 @@ public class NameNodeResourceChecker { } return lowVolumes; } + + @VisibleForTesting + void setVolumes(Map volumes) { + this.volumes = volumes; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java index 60410a220b3..15cb4805a5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java @@ -19,9 +19,12 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.File; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DF; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor; @@ -29,6 +32,8 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import com.google.common.collect.Lists; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -41,7 +46,7 @@ public class TestNameNodeResourceChecker { @Before public void setUp () throws IOException { conf = new Configuration(); - baseDir = new File(conf.get("hadoop.tmp.dir")); + baseDir = new File(System.getProperty("test.build.data")); nameDir = new File(baseDir, "resource-check-name-dir"); nameDir.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); @@ -50,8 +55,6 @@ public class TestNameNodeResourceChecker { /** * Tests that hasAvailableDiskSpace returns true if disk usage is below * threshold. - * - * @throws IOException in case of errors */ @Test public void testCheckAvailability() @@ -67,8 +70,6 @@ public class TestNameNodeResourceChecker { /** * Tests that hasAvailableDiskSpace returns false if disk usage is above * threshold. - * - * @throws IOException in case of errors */ @Test public void testCheckAvailabilityNeg() throws IOException { @@ -83,9 +84,6 @@ public class TestNameNodeResourceChecker { /** * Tests that NameNode resource monitor causes the NN to enter safe mode when * resources are low. - * - * @throws IOException in case of errors - * @throws InterruptedException */ @Test public void testCheckThatNameNodeResourceMonitorIsRunning() @@ -139,14 +137,12 @@ public class TestNameNodeResourceChecker { /** * Tests that only a single space check is performed if two name dirs are * supplied which are on the same volume. - * - * @throws IOException */ @Test public void testChecking2NameDirsOnOneVolume() throws IOException { Configuration conf = new Configuration(); - File nameDir1 = new File(conf.get("hadoop.tmp.dir", "name-dir1")); - File nameDir2 = new File(conf.get("hadoop.tmp.dir", "name-dir2")); + File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1"); + File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2"); nameDir1.mkdirs(); nameDir2.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, @@ -162,13 +158,11 @@ public class TestNameNodeResourceChecker { /** * Tests that only a single space check is performed if extra volumes are * configured manually which also coincide with a volume the name dir is on. - * - * @throws IOException */ @Test public void testCheckingExtraVolumes() throws IOException { Configuration conf = new Configuration(); - File nameDir = new File(conf.get("hadoop.tmp.dir", "name-dir")); + File nameDir = new File(System.getProperty("test.build.data"), "name-dir"); nameDir.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath()); @@ -179,4 +173,41 @@ public class TestNameNodeResourceChecker { assertEquals("Should not check the same volume more than once.", 1, nb.getVolumesLowOnSpace().size()); } + + /** + * Test that the NN is considered to be out of resources only once all + * configured volumes are low on resources. + */ + @Test + public void testLowResourceVolumePolicy() throws IOException { + Configuration conf = new Configuration(); + File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1"); + File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2"); + nameDir1.mkdirs(); + nameDir2.mkdirs(); + + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath()); + + NameNodeResourceChecker nnrc = new NameNodeResourceChecker(conf); + + // For the purpose of this test, we need to force the name dirs to appear to + // be on different volumes. + Map volumes = new HashMap(); + volumes.put("volume1", new DF(nameDir1, conf)); + volumes.put("volume2", new DF(nameDir2, conf)); + nnrc.setVolumes(volumes); + + NameNodeResourceChecker spyNnrc = Mockito.spy(nnrc); + + Mockito.when(spyNnrc.getVolumesLowOnSpace()).thenReturn( + Lists.newArrayList("volume1")); + + assertTrue(spyNnrc.hasAvailableDiskSpace()); + + Mockito.when(spyNnrc.getVolumesLowOnSpace()).thenReturn( + Lists.newArrayList("volume1", "volume2")); + + assertFalse(spyNnrc.hasAvailableDiskSpace()); + } }