From 535fd7de153d5a6f28a0f59bf6433ac8b90a562a Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Fri, 29 Jun 2012 22:23:53 +0000 Subject: [PATCH] HDFS-3446. HostsFileReader silently ignores bad includes/excludes. Contributed by Matthew Jacobs. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1355583 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/util/HostsFileReader.java | 3 -- .../hadoop/util/TestHostsFileReader.java | 41 +++++++++++++++++-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../apache/hadoop/hdfs/TestDecommission.java | 3 +- .../TestBlocksWithNotEnoughRacks.java | 6 +++ .../namenode/NNThroughputBenchmark.java | 6 ++- .../namenode/TestDecommissioningStatus.java | 6 ++- .../hdfs/server/namenode/TestStartup.java | 6 +-- 8 files changed, 61 insertions(+), 13 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java index 1b6abf4832b..818bffe2f1b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java @@ -50,9 +50,6 @@ public HostsFileReader(String inFile, private void readFileToSet(String filename, Set set) throws IOException { File file = new File(filename); - if (!file.exists()) { - return; - } FileInputStream fis = new FileInputStream(file); BufferedReader reader = null; try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java index 9fbd02cfd75..7de0be859f7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java @@ -18,8 +18,8 @@ package org.apache.hadoop.util; import java.io.File; +import java.io.FileNotFoundException; import java.io.FileWriter; -import java.util.Set; import org.junit.*; import static org.junit.Assert.*; @@ -37,8 +37,6 @@ public class TestHostsFileReader { File INCLUDES_FILE = new File(HOSTS_TEST_DIR, "dfs.include"); String excludesFile = HOSTS_TEST_DIR + "/dfs.exclude"; String includesFile = HOSTS_TEST_DIR + "/dfs.include"; - private Set includes; - private Set excludes; @Before public void setUp() throws Exception { @@ -100,6 +98,43 @@ public void testHostsFileReader() throws Exception { } + /* + * Test creating a new HostsFileReader with nonexistent files + */ + @Test + public void testCreateHostFileReaderWithNonexistentFile() throws Exception { + try { + new HostsFileReader( + HOSTS_TEST_DIR + "/doesnt-exist", + HOSTS_TEST_DIR + "/doesnt-exist"); + Assert.fail("Should throw FileNotFoundException"); + } catch (FileNotFoundException ex) { + // Exception as expected + } + } + + /* + * Test refreshing an existing HostsFileReader with an includes file that no longer exists + */ + @Test + public void testRefreshHostFileReaderWithNonexistentFile() throws Exception { + FileWriter efw = new FileWriter(excludesFile); + FileWriter ifw = new FileWriter(includesFile); + + efw.close(); + + ifw.close(); + + HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile); + assertTrue(INCLUDES_FILE.delete()); + try { + hfp.refresh(); + Assert.fail("Should throw FileNotFoundException"); + } catch (FileNotFoundException ex) { + // Exception as expected + } + } + /* * Test for null file */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2f8f3dc0018..12844dfdd8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -4,6 +4,9 @@ Release 2.0.1-alpha - UNRELEASED INCOMPATIBLE CHANGES + HDFS-3446. HostsFileReader silently ignores bad includes/excludes + (Matthew Jacobs via todd) + NEW FEATURES HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 483d3579cda..09456237cce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -78,6 +78,7 @@ public void setup() throws IOException { // Setup conf conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); + conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL); @@ -85,6 +86,7 @@ public void setup() throws IOException { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, NAMENODE_REPLICATION_INTERVAL); + writeConfigFile(hostsFile, null); writeConfigFile(excludeFile, null); } @@ -508,7 +510,6 @@ public void testHostsFileFederation() throws IOException, InterruptedException { public void testHostsFile(int numNameNodes) throws IOException, InterruptedException { - conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); int numDatanodes = 1; cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(numNameNodes)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java index 3161124e9cb..1ee4b250b20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java @@ -389,9 +389,12 @@ public void testNodeDecomissionRespectsRackPolicy() throws Exception { Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); + Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); + DFSTestUtil.writeFile(localFileSys, includeFile, ""); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); + conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); // Two blocks and four racks String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"}; @@ -439,8 +442,11 @@ public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); + Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); + DFSTestUtil.writeFile(localFileSys, includeFile, ""); + conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); // All hosts are on two racks, only one host on /rack2 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 7644e054cbe..497334fc123 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -119,11 +119,15 @@ public class NNThroughputBenchmark { "${hadoop.tmp.dir}/dfs/hosts/exclude"); File excludeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, "exclude")); - if(! excludeFile.exists()) { + if(!excludeFile.exists()) { if(!excludeFile.getParentFile().mkdirs()) throw new IOException("NNThroughputBenchmark: cannot mkdir " + excludeFile); } new FileOutputStream(excludeFile).close(); + // set include file + config.set(DFSConfigKeys.DFS_HOSTS, "${hadoop.tmp.dir}/dfs/hosts/include"); + File includeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS, "include")); + new FileOutputStream(includeFile).close(); // Start the NameNode String[] argv = new String[] {}; nameNode = NameNode.createNameNode(argv, config); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index 463cd952fba..0138070cd25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -74,14 +74,16 @@ public static void setUp() throws Exception { assertTrue(localFileSys.mkdirs(dir)); excludeFile = new Path(dir, "exclude"); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); - conf - .setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); + Path includeFile = new Path(dir, "include"); + conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1); writeConfigFile(localFileSys, excludeFile, null); + writeConfigFile(localFileSys, includeFile, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index c265579427b..e098a22a52e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -507,7 +507,7 @@ public void testNNRestart() throws IOException, InterruptedException { // Setup conf conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); - writeConfigFile(localFileSys, excludeFile, null); + writeConfigFile(localFileSys, excludeFile, null); conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); // write into hosts file ArrayListlist = new ArrayList(); @@ -553,15 +553,15 @@ private void writeConfigFile(FileSystem localFileSys, Path name, localFileSys.delete(name, true); } + FSDataOutputStream stm = localFileSys.create(name); if (nodes != null) { - FSDataOutputStream stm = localFileSys.create(name); for (Iterator it = nodes.iterator(); it.hasNext();) { String node = it.next(); stm.writeBytes(node); stm.writeBytes("\n"); } - stm.close(); } + stm.close(); } private void cleanupFile(FileSystem fileSys, Path name) throws IOException {