HDFS-4279. NameNode does not initialize generic conf keys when started with -recover. Contributed by Colin Patrick McCabe.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1418560 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3ff00d4aaa
commit
2e12de45a4
|
@ -306,6 +306,9 @@ Release 2.0.3-alpha - Unreleased
|
||||||
HDFS-4236. Remove artificial limit on username length introduced in
|
HDFS-4236. Remove artificial limit on username length introduced in
|
||||||
HDFS-4171. (tucu via suresh)
|
HDFS-4171. (tucu via suresh)
|
||||||
|
|
||||||
|
HDFS-4279. NameNode does not initialize generic conf keys when started
|
||||||
|
with -recover. (Colin Patrick McCabe via atm)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-3077 SUBTASKS
|
BREAKDOWN OF HDFS-3077 SUBTASKS
|
||||||
|
|
||||||
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
|
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
|
||||||
|
|
|
@ -1052,6 +1052,9 @@ public class NameNode {
|
||||||
|
|
||||||
private static void doRecovery(StartupOption startOpt, Configuration conf)
|
private static void doRecovery(StartupOption startOpt, Configuration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
|
||||||
|
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
|
||||||
|
initializeGenericKeys(conf, nsId, namenodeId);
|
||||||
if (startOpt.getForce() < MetaRecoveryContext.FORCE_ALL) {
|
if (startOpt.getForce() < MetaRecoveryContext.FORCE_ALL) {
|
||||||
if (!confirmPrompt("You have selected Metadata Recovery mode. " +
|
if (!confirmPrompt("You have selected Metadata Recovery mode. " +
|
||||||
"This mode is intended to recover lost metadata on a corrupt " +
|
"This mode is intended to recover lost metadata on a corrupt " +
|
||||||
|
|
|
@ -30,11 +30,16 @@ import java.io.RandomAccessFile;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
|
@ -432,6 +437,39 @@ public class TestNameNodeRecovery {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a test configuration that will exercise the initializeGenericKeys
|
||||||
|
* code path. This is a regression test for HDFS-4279.
|
||||||
|
*/
|
||||||
|
static void setupRecoveryTestConf(Configuration conf) throws IOException {
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
|
||||||
|
conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
|
||||||
|
"ns1"), "nn1,nn2");
|
||||||
|
String baseDir = System.getProperty(
|
||||||
|
MiniDFSCluster.PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
|
||||||
|
File nameDir = new File(baseDir, "nameR");
|
||||||
|
File secondaryDir = new File(baseDir, "namesecondaryR");
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
|
||||||
|
DFS_NAMENODE_NAME_DIR_KEY, "ns1", "nn1"),
|
||||||
|
nameDir.getCanonicalPath());
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
|
||||||
|
DFS_NAMENODE_CHECKPOINT_DIR_KEY, "ns1", "nn1"),
|
||||||
|
secondaryDir.getCanonicalPath());
|
||||||
|
conf.unset(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
|
||||||
|
conf.unset(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
|
||||||
|
FileUtils.deleteQuietly(nameDir);
|
||||||
|
if (!nameDir.mkdirs()) {
|
||||||
|
throw new RuntimeException("failed to make directory " +
|
||||||
|
nameDir.getAbsolutePath());
|
||||||
|
}
|
||||||
|
FileUtils.deleteQuietly(secondaryDir);
|
||||||
|
if (!secondaryDir.mkdirs()) {
|
||||||
|
throw new RuntimeException("failed to make directory " +
|
||||||
|
secondaryDir.getAbsolutePath());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void testNameNodeRecoveryImpl(Corruptor corruptor, boolean finalize)
|
static void testNameNodeRecoveryImpl(Corruptor corruptor, boolean finalize)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final String TEST_PATH = "/test/path/dir";
|
final String TEST_PATH = "/test/path/dir";
|
||||||
|
@ -440,12 +478,13 @@ public class TestNameNodeRecovery {
|
||||||
|
|
||||||
// start a cluster
|
// start a cluster
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
setupRecoveryTestConf(conf);
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
FileSystem fileSys = null;
|
FileSystem fileSys = null;
|
||||||
StorageDirectory sd = null;
|
StorageDirectory sd = null;
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
||||||
.enableManagedDfsDirsRedundancy(false).build();
|
.manageNameDfsDirs(false).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
if (!finalize) {
|
if (!finalize) {
|
||||||
// Normally, the in-progress edit log would be finalized by
|
// Normally, the in-progress edit log would be finalized by
|
||||||
|
|
Loading…
Reference in New Issue