HDFS-2422. The NN should tolerate the same number of low-resource volumes as failed volumes (atm)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1181316 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2011-10-11 00:37:29 +00:00
parent 42e93829e5
commit f527f989af
3 changed files with 61 additions and 19 deletions

View File

@ -110,6 +110,9 @@ Trunk (unreleased changes)
HDFS-2404. webhdfs liststatus json response is not correct. (suresh) HDFS-2404. webhdfs liststatus json response is not correct. (suresh)
HDFS-2422. The NN should tolerate the same number of low-resource volumes
as failed volumes (atm)
Release 0.23.0 - Unreleased Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -32,6 +32,8 @@ import org.apache.hadoop.fs.DF;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import com.google.common.annotations.VisibleForTesting;
/** /**
* *
* NameNodeResourceChecker provides a method - * NameNodeResourceChecker provides a method -
@ -91,15 +93,16 @@ public class NameNodeResourceChecker {
} }
/** /**
* Return true if disk space is available on all all the configured volumes. * Return true if disk space is available on at least one of the configured
* volumes.
* *
* @return True if the configured amount of disk space is available on all * @return True if the configured amount of disk space is available on at
* volumes, false otherwise. * least one volume, false otherwise.
* @throws IOException * @throws IOException
*/ */
boolean hasAvailableDiskSpace() boolean hasAvailableDiskSpace()
throws IOException { throws IOException {
return getVolumesLowOnSpace().size() == 0; return getVolumesLowOnSpace().size() < volumes.size();
} }
/** /**
@ -127,4 +130,9 @@ public class NameNodeResourceChecker {
} }
return lowVolumes; return lowVolumes;
} }
@VisibleForTesting
void setVolumes(Map<String, DF> volumes) {
this.volumes = volumes;
}
} }

View File

@ -19,9 +19,12 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor;
@ -29,6 +32,8 @@ import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
import com.google.common.collect.Lists;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -41,7 +46,7 @@ public class TestNameNodeResourceChecker {
@Before @Before
public void setUp () throws IOException { public void setUp () throws IOException {
conf = new Configuration(); conf = new Configuration();
baseDir = new File(conf.get("hadoop.tmp.dir")); baseDir = new File(System.getProperty("test.build.data"));
nameDir = new File(baseDir, "resource-check-name-dir"); nameDir = new File(baseDir, "resource-check-name-dir");
nameDir.mkdirs(); nameDir.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
@ -50,8 +55,6 @@ public class TestNameNodeResourceChecker {
/** /**
* Tests that hasAvailableDiskSpace returns true if disk usage is below * Tests that hasAvailableDiskSpace returns true if disk usage is below
* threshold. * threshold.
*
* @throws IOException in case of errors
*/ */
@Test @Test
public void testCheckAvailability() public void testCheckAvailability()
@ -67,8 +70,6 @@ public class TestNameNodeResourceChecker {
/** /**
* Tests that hasAvailableDiskSpace returns false if disk usage is above * Tests that hasAvailableDiskSpace returns false if disk usage is above
* threshold. * threshold.
*
* @throws IOException in case of errors
*/ */
@Test @Test
public void testCheckAvailabilityNeg() throws IOException { public void testCheckAvailabilityNeg() throws IOException {
@ -83,9 +84,6 @@ public class TestNameNodeResourceChecker {
/** /**
* Tests that NameNode resource monitor causes the NN to enter safe mode when * Tests that NameNode resource monitor causes the NN to enter safe mode when
* resources are low. * resources are low.
*
* @throws IOException in case of errors
* @throws InterruptedException
*/ */
@Test @Test
public void testCheckThatNameNodeResourceMonitorIsRunning() public void testCheckThatNameNodeResourceMonitorIsRunning()
@ -139,14 +137,12 @@ public class TestNameNodeResourceChecker {
/** /**
* Tests that only a single space check is performed if two name dirs are * Tests that only a single space check is performed if two name dirs are
* supplied which are on the same volume. * supplied which are on the same volume.
*
* @throws IOException
*/ */
@Test @Test
public void testChecking2NameDirsOnOneVolume() throws IOException { public void testChecking2NameDirsOnOneVolume() throws IOException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
File nameDir1 = new File(conf.get("hadoop.tmp.dir", "name-dir1")); File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
File nameDir2 = new File(conf.get("hadoop.tmp.dir", "name-dir2")); File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
nameDir1.mkdirs(); nameDir1.mkdirs();
nameDir2.mkdirs(); nameDir2.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
@ -162,13 +158,11 @@ public class TestNameNodeResourceChecker {
/** /**
* Tests that only a single space check is performed if extra volumes are * Tests that only a single space check is performed if extra volumes are
* configured manually which also coincide with a volume the name dir is on. * configured manually which also coincide with a volume the name dir is on.
*
* @throws IOException
*/ */
@Test @Test
public void testCheckingExtraVolumes() throws IOException { public void testCheckingExtraVolumes() throws IOException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
File nameDir = new File(conf.get("hadoop.tmp.dir", "name-dir")); File nameDir = new File(System.getProperty("test.build.data"), "name-dir");
nameDir.mkdirs(); nameDir.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath());
@ -179,4 +173,41 @@ public class TestNameNodeResourceChecker {
assertEquals("Should not check the same volume more than once.", assertEquals("Should not check the same volume more than once.",
1, nb.getVolumesLowOnSpace().size()); 1, nb.getVolumesLowOnSpace().size());
} }
/**
* Test that the NN is considered to be out of resources only once all
* configured volumes are low on resources.
*/
@Test
public void testLowResourceVolumePolicy() throws IOException {
Configuration conf = new Configuration();
File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
nameDir1.mkdirs();
nameDir2.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
NameNodeResourceChecker nnrc = new NameNodeResourceChecker(conf);
// For the purpose of this test, we need to force the name dirs to appear to
// be on different volumes.
Map<String, DF> volumes = new HashMap<String, DF>();
volumes.put("volume1", new DF(nameDir1, conf));
volumes.put("volume2", new DF(nameDir2, conf));
nnrc.setVolumes(volumes);
NameNodeResourceChecker spyNnrc = Mockito.spy(nnrc);
Mockito.when(spyNnrc.getVolumesLowOnSpace()).thenReturn(
Lists.newArrayList("volume1"));
assertTrue(spyNnrc.hasAvailableDiskSpace());
Mockito.when(spyNnrc.getVolumesLowOnSpace()).thenReturn(
Lists.newArrayList("volume1", "volume2"));
assertFalse(spyNnrc.hasAvailableDiskSpace());
}
} }