From 2e12de45a42e6281a8caefda7179c33f475638af Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Fri, 7 Dec 2012 23:54:31 +0000 Subject: [PATCH] HDFS-4279. NameNode does not initialize generic conf keys when started with -recover. Contributed by Colin Patrick McCabe. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1418560 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hadoop/hdfs/server/namenode/NameNode.java | 3 ++ .../server/namenode/TestNameNodeRecovery.java | 41 ++++++++++++++++++- 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 967f4684755..6428218109a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -306,6 +306,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4236. Remove artificial limit on username length introduced in HDFS-4171. (tucu via suresh) + HDFS-4279. NameNode does not initialize generic conf keys when started + with -recover. (Colin Patrick McCabe via atm) + BREAKDOWN OF HDFS-3077 SUBTASKS HDFS-3077. Quorum-based protocol for reading and writing edit logs. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index fc85428023d..d578c0e7dd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1052,6 +1052,9 @@ public class NameNode { private static void doRecovery(StartupOption startOpt, Configuration conf) throws IOException { + String nsId = DFSUtil.getNamenodeNameServiceId(conf); + String namenodeId = HAUtil.getNameNodeId(conf, nsId); + initializeGenericKeys(conf, nsId, namenodeId); if (startOpt.getForce() < MetaRecoveryContext.FORCE_ALL) { if (!confirmPrompt("You have selected Metadata Recovery mode. " + "This mode is intended to recover lost metadata on a corrupt " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index 8379cb2332b..5f18ee4236b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -30,11 +30,16 @@ import java.io.RandomAccessFile; import java.util.HashSet; import java.util.Set; +import junit.framework.Assert; + +import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -432,6 +437,39 @@ public class TestNameNodeRecovery { } } + /** + * Create a test configuration that will exercise the initializeGenericKeys + * code path. This is a regression test for HDFS-4279. + */ + static void setupRecoveryTestConf(Configuration conf) throws IOException { + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1"); + conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1"); + conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, + "ns1"), "nn1,nn2"); + String baseDir = System.getProperty( + MiniDFSCluster.PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/"; + File nameDir = new File(baseDir, "nameR"); + File secondaryDir = new File(baseDir, "namesecondaryR"); + conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys. + DFS_NAMENODE_NAME_DIR_KEY, "ns1", "nn1"), + nameDir.getCanonicalPath()); + conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys. + DFS_NAMENODE_CHECKPOINT_DIR_KEY, "ns1", "nn1"), + secondaryDir.getCanonicalPath()); + conf.unset(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); + conf.unset(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); + FileUtils.deleteQuietly(nameDir); + if (!nameDir.mkdirs()) { + throw new RuntimeException("failed to make directory " + + nameDir.getAbsolutePath()); + } + FileUtils.deleteQuietly(secondaryDir); + if (!secondaryDir.mkdirs()) { + throw new RuntimeException("failed to make directory " + + secondaryDir.getAbsolutePath()); + } + } + static void testNameNodeRecoveryImpl(Corruptor corruptor, boolean finalize) throws IOException { final String TEST_PATH = "/test/path/dir"; @@ -440,12 +478,13 @@ public class TestNameNodeRecovery { // start a cluster Configuration conf = new HdfsConfiguration(); + setupRecoveryTestConf(conf); MiniDFSCluster cluster = null; FileSystem fileSys = null; StorageDirectory sd = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) - .enableManagedDfsDirsRedundancy(false).build(); + .manageNameDfsDirs(false).build(); cluster.waitActive(); if (!finalize) { // Normally, the in-progress edit log would be finalized by