From 969e84decbc976bd98f1050aead695d15a024ab6 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 12 Feb 2013 00:50:00 +0000 Subject: [PATCH] HDFS-4342. Directories configured in dfs.namenode.edits.dir.required but not in dfs.namenode.edits.dir are silently ignored. Contributed by Arpit Agarwal git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1445006 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 1 + .../hdfs/server/namenode/FSNamesystem.java | 92 ++++++++++++------- .../hadoop/hdfs/server/namenode/NameNode.java | 28 +++++- .../server/namenode/TestNameEditsConfigs.java | 82 +++++++++++++++++ 5 files changed, 168 insertions(+), 39 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 79872e1557f..5cf92d3232b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -317,6 +317,10 @@ Release 2.0.4-beta - UNRELEASED HDFS-4471. Namenode WebUI file browsing does not work with wildcard addresses configured. (Andrew Wang via atm) + HDFS-4342. Directories configured in dfs.namenode.edits.dir.required + but not in dfs.namenode.edits.dir are silently ignored. (Arpit Agarwal + via szetszwo) + Release 2.0.3-alpha - 2013-02-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 24cdba7e1bf..b1a4f283f5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -235,6 +235,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir"; public static final String DFS_NAMENODE_EDITS_PLUGIN_PREFIX = "dfs.namenode.edits.journal-plugin"; public static final String DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY = "dfs.namenode.edits.dir.required"; + public static final String DFS_NAMENODE_EDITS_DIR_DEFAULT = "file:///tmp/hadoop/dfs/name"; public static final String DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size"; public static final String DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base"; public static final String DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 2a45afa1747..b626612ec2d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -127,6 +127,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.ServiceFailedException; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -422,11 +423,61 @@ LeaseManager getLeaseManager() { } /** + * Check the supplied configuration for correctness. + * @param conf Supplies the configuration to validate. + * @throws IOException if the configuration could not be queried. + * @throws IllegalArgumentException if the configuration is invalid. + */ + private static void checkConfiguration(Configuration conf) + throws IOException { + + final Collection namespaceDirs = + FSNamesystem.getNamespaceDirs(conf); + final Collection editsDirs = + FSNamesystem.getNamespaceEditsDirs(conf); + final Collection requiredEditsDirs = + FSNamesystem.getRequiredNamespaceEditsDirs(conf); + final Collection sharedEditsDirs = + FSNamesystem.getSharedEditsDirs(conf); + + for (URI u : requiredEditsDirs) { + if (u.toString().compareTo( + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT) == 0) { + continue; + } + + // Each required directory must also be in editsDirs or in + // sharedEditsDirs. + if (!editsDirs.contains(u) && + !sharedEditsDirs.contains(u)) { + throw new IllegalArgumentException( + "Required edits directory " + u.toString() + " not present in " + + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + ". " + + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + "=" + + editsDirs.toString() + "; " + + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY + "=" + + requiredEditsDirs.toString() + ". " + + DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY + "=" + + sharedEditsDirs.toString() + "."); + } + } + + if (namespaceDirs.size() == 1) { + LOG.warn("Only one image storage directory (" + + DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of dataloss" + + " due to lack of redundant storage directories!"); + } + if (editsDirs.size() == 1) { + LOG.warn("Only one namespace edits storage directory (" + + DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of dataloss" + + " due to lack of redundant storage directories!"); + } + } /** * Instantiates an FSNamesystem loaded from the image and edits * directories specified in the passed Configuration. - * + * * @param conf the Configuration which specifies the storage directories * from which to load * @return an FSNamesystem which contains the loaded namespace @@ -434,39 +485,11 @@ LeaseManager getLeaseManager() { */ public static FSNamesystem loadFromDisk(Configuration conf) throws IOException { - Collection namespaceDirs = FSNamesystem.getNamespaceDirs(conf); - List namespaceEditsDirs = - FSNamesystem.getNamespaceEditsDirs(conf); - return loadFromDisk(conf, namespaceDirs, namespaceEditsDirs); - } - /** - * Instantiates an FSNamesystem loaded from the image and edits - * directories passed. - * - * @param conf the Configuration which specifies the storage directories - * from which to load - * @param namespaceDirs directories to load the fsimages - * @param namespaceEditsDirs directories to load the edits from - * @return an FSNamesystem which contains the loaded namespace - * @throws IOException if loading fails - */ - public static FSNamesystem loadFromDisk(Configuration conf, - Collection namespaceDirs, List namespaceEditsDirs) - throws IOException { - - if (namespaceDirs.size() == 1) { - LOG.warn("Only one image storage directory (" - + DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of dataloss" - + " due to lack of redundant storage directories!"); - } - if (namespaceEditsDirs.size() == 1) { - LOG.warn("Only one namespace edits storage directory (" - + DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of dataloss" - + " due to lack of redundant storage directories!"); - } - - FSImage fsImage = new FSImage(conf, namespaceDirs, namespaceEditsDirs); + checkConfiguration(conf); + FSImage fsImage = new FSImage(conf, + FSNamesystem.getNamespaceDirs(conf), + FSNamesystem.getNamespaceEditsDirs(conf)); FSNamesystem namesystem = new FSNamesystem(conf, fsImage); StartupOption startOpt = NameNode.getStartupOption(conf); if (startOpt == StartupOption.RECOVER) { @@ -913,7 +936,8 @@ private static Collection getStorageDirs(Configuration conf, "\n\t\t- use Backup Node as a persistent and up-to-date storage " + "of the file system meta-data."); } else if (dirNames.isEmpty()) { - dirNames = Collections.singletonList("file:///tmp/hadoop/dfs/name"); + dirNames = Collections.singletonList( + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT); } return Util.stringCollectionAsURIs(dirNames); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 99f804d1bc5..f3c95fa9feb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -78,6 +78,7 @@ import org.apache.hadoop.util.StringUtils; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; @@ -780,6 +781,26 @@ public static boolean initializeSharedEdits(Configuration conf, return initializeSharedEdits(conf, force, false); } + /** + * Clone the supplied configuration but remove the shared edits dirs. + * + * @param conf Supplies the original configuration. + * @return Cloned configuration without the shared edit dirs. + * @throws IOException on failure to generate the configuration. + */ + private static Configuration getConfigurationWithoutSharedEdits( + Configuration conf) + throws IOException { + List editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false); + String editsDirsString = Joiner.on(",").join(editsDirs); + + Configuration confWithoutShared = new Configuration(conf); + confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY); + confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, + editsDirsString); + return confWithoutShared; + } + /** * Format a new shared edits dir and copy in enough edit log segments so that * the standby NN can start up. @@ -809,11 +830,8 @@ private static boolean initializeSharedEdits(Configuration conf, NNStorage existingStorage = null; try { - Configuration confWithoutShared = new Configuration(conf); - confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY); - FSNamesystem fsns = FSNamesystem.loadFromDisk(confWithoutShared, - FSNamesystem.getNamespaceDirs(conf), - FSNamesystem.getNamespaceEditsDirs(conf, false)); + FSNamesystem fsns = + FSNamesystem.loadFromDisk(getConfigurationWithoutSharedEdits(conf)); existingStorage = fsns.getFSImage().getStorage(); NamespaceInfo nsInfo = existingStorage.getNamespaceInfo(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 63388be3207..b2368a1458f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -308,6 +308,88 @@ private FSImageTransactionalStorageInspector inspect(File storageDir) new File(storageDir, "current"), NameNodeDirType.IMAGE_AND_EDITS); } + /** + * Test edits.dir.required configuration options. + * 1. Directory present in dfs.namenode.edits.dir.required but not in + * dfs.namenode.edits.dir. Expected to fail. + * 2. Directory present in both dfs.namenode.edits.dir.required and + * dfs.namenode.edits.dir. Expected to succeed. + * 3. Directory present only in dfs.namenode.edits.dir. Expected to + * succeed. + */ + @Test + public void testNameEditsRequiredConfigs() throws IOException { + MiniDFSCluster cluster = null; + File nameAndEditsDir = new File(base_dir, "name_and_edits"); + File nameAndEditsDir2 = new File(base_dir, "name_and_edits2"); + + // 1 + // Bad configuration. Add a directory to dfs.namenode.edits.dir.required + // without adding it to dfs.namenode.edits.dir. + try { + Configuration conf = new HdfsConfiguration(); + conf.set( + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY, + nameAndEditsDir2.toURI().toString()); + conf.set( + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, + nameAndEditsDir.toURI().toString()); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(NUM_DATA_NODES) + .manageNameDfsDirs(false) + .build(); + fail("Successfully started cluster but should not have been able to."); + } catch (IllegalArgumentException iae) { // expect to fail + LOG.info("EXPECTED: cluster start failed due to bad configuration" + iae); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + cluster = null; + } + + // 2 + // Good configuration. Add a directory to both dfs.namenode.edits.dir.required + // and dfs.namenode.edits.dir. + try { + Configuration conf = new HdfsConfiguration(); + conf.setStrings( + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, + nameAndEditsDir.toURI().toString(), + nameAndEditsDir2.toURI().toString()); + conf.set( + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY, + nameAndEditsDir2.toURI().toString()); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(NUM_DATA_NODES) + .manageNameDfsDirs(false) + .build(); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + + // 3 + // Good configuration. Adds a directory to dfs.namenode.edits.dir but not to + // dfs.namenode.edits.dir.required. + try { + Configuration conf = new HdfsConfiguration(); + conf.setStrings( + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, + nameAndEditsDir.toURI().toString(), + nameAndEditsDir2.toURI().toString()); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(NUM_DATA_NODES) + .manageNameDfsDirs(false) + .build(); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + /** * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir * This test tries to simulate failure scenarios.