svn merge -c 1445006 from trunk for HDFS-4342. Directories configured in dfs.namenode.edits.dir.required but not in dfs.namenode.edits.dir are silently ignored.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1445007 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1991072520
commit
7ae27355bc
|
@ -18,6 +18,10 @@ Release 2.0.4-beta - UNRELEASED
|
|||
HDFS-4471. Namenode WebUI file browsing does not work with wildcard
|
||||
addresses configured. (Andrew Wang via atm)
|
||||
|
||||
HDFS-4342. Directories configured in dfs.namenode.edits.dir.required
|
||||
but not in dfs.namenode.edits.dir are silently ignored. (Arpit Agarwal
|
||||
via szetszwo)
|
||||
|
||||
Release 2.0.3-alpha - 2013-02-06
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -235,6 +235,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
|
||||
public static final String DFS_NAMENODE_EDITS_PLUGIN_PREFIX = "dfs.namenode.edits.journal-plugin";
|
||||
public static final String DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY = "dfs.namenode.edits.dir.required";
|
||||
public static final String DFS_NAMENODE_EDITS_DIR_DEFAULT = "file:///tmp/hadoop/dfs/name";
|
||||
public static final String DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size";
|
||||
public static final String DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
|
||||
public static final String DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
|
||||
|
|
|
@ -127,6 +127,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||
import org.apache.hadoop.ha.ServiceFailedException;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
|
@ -376,11 +377,61 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
/**
|
||||
* Check the supplied configuration for correctness.
|
||||
* @param conf Supplies the configuration to validate.
|
||||
* @throws IOException if the configuration could not be queried.
|
||||
* @throws IllegalArgumentException if the configuration is invalid.
|
||||
*/
|
||||
private static void checkConfiguration(Configuration conf)
|
||||
throws IOException {
|
||||
|
||||
final Collection<URI> namespaceDirs =
|
||||
FSNamesystem.getNamespaceDirs(conf);
|
||||
final Collection<URI> editsDirs =
|
||||
FSNamesystem.getNamespaceEditsDirs(conf);
|
||||
final Collection<URI> requiredEditsDirs =
|
||||
FSNamesystem.getRequiredNamespaceEditsDirs(conf);
|
||||
final Collection<URI> sharedEditsDirs =
|
||||
FSNamesystem.getSharedEditsDirs(conf);
|
||||
|
||||
for (URI u : requiredEditsDirs) {
|
||||
if (u.toString().compareTo(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT) == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Each required directory must also be in editsDirs or in
|
||||
// sharedEditsDirs.
|
||||
if (!editsDirs.contains(u) &&
|
||||
!sharedEditsDirs.contains(u)) {
|
||||
throw new IllegalArgumentException(
|
||||
"Required edits directory " + u.toString() + " not present in " +
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + ". " +
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + "=" +
|
||||
editsDirs.toString() + "; " +
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY + "=" +
|
||||
requiredEditsDirs.toString() + ". " +
|
||||
DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY + "=" +
|
||||
sharedEditsDirs.toString() + ".");
|
||||
}
|
||||
}
|
||||
|
||||
if (namespaceDirs.size() == 1) {
|
||||
LOG.warn("Only one image storage directory ("
|
||||
+ DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of dataloss"
|
||||
+ " due to lack of redundant storage directories!");
|
||||
}
|
||||
if (editsDirs.size() == 1) {
|
||||
LOG.warn("Only one namespace edits storage directory ("
|
||||
+ DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of dataloss"
|
||||
+ " due to lack of redundant storage directories!");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates an FSNamesystem loaded from the image and edits
|
||||
* directories specified in the passed Configuration.
|
||||
*
|
||||
*
|
||||
* @param conf the Configuration which specifies the storage directories
|
||||
* from which to load
|
||||
* @return an FSNamesystem which contains the loaded namespace
|
||||
|
@ -388,39 +439,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
*/
|
||||
public static FSNamesystem loadFromDisk(Configuration conf)
|
||||
throws IOException {
|
||||
Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
|
||||
List<URI> namespaceEditsDirs =
|
||||
FSNamesystem.getNamespaceEditsDirs(conf);
|
||||
return loadFromDisk(conf, namespaceDirs, namespaceEditsDirs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates an FSNamesystem loaded from the image and edits
|
||||
* directories passed.
|
||||
*
|
||||
* @param conf the Configuration which specifies the storage directories
|
||||
* from which to load
|
||||
* @param namespaceDirs directories to load the fsimages
|
||||
* @param namespaceEditsDirs directories to load the edits from
|
||||
* @return an FSNamesystem which contains the loaded namespace
|
||||
* @throws IOException if loading fails
|
||||
*/
|
||||
public static FSNamesystem loadFromDisk(Configuration conf,
|
||||
Collection<URI> namespaceDirs, List<URI> namespaceEditsDirs)
|
||||
throws IOException {
|
||||
|
||||
if (namespaceDirs.size() == 1) {
|
||||
LOG.warn("Only one image storage directory ("
|
||||
+ DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of dataloss"
|
||||
+ " due to lack of redundant storage directories!");
|
||||
}
|
||||
if (namespaceEditsDirs.size() == 1) {
|
||||
LOG.warn("Only one namespace edits storage directory ("
|
||||
+ DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of dataloss"
|
||||
+ " due to lack of redundant storage directories!");
|
||||
}
|
||||
|
||||
FSImage fsImage = new FSImage(conf, namespaceDirs, namespaceEditsDirs);
|
||||
checkConfiguration(conf);
|
||||
FSImage fsImage = new FSImage(conf,
|
||||
FSNamesystem.getNamespaceDirs(conf),
|
||||
FSNamesystem.getNamespaceEditsDirs(conf));
|
||||
FSNamesystem namesystem = new FSNamesystem(conf, fsImage);
|
||||
StartupOption startOpt = NameNode.getStartupOption(conf);
|
||||
if (startOpt == StartupOption.RECOVER) {
|
||||
|
@ -865,7 +888,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
"\n\t\t- use Backup Node as a persistent and up-to-date storage " +
|
||||
"of the file system meta-data.");
|
||||
} else if (dirNames.isEmpty()) {
|
||||
dirNames = Collections.singletonList("file:///tmp/hadoop/dfs/name");
|
||||
dirNames = Collections.singletonList(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT);
|
||||
}
|
||||
return Util.stringCollectionAsURIs(dirNames);
|
||||
}
|
||||
|
|
|
@ -80,6 +80,7 @@ import org.apache.hadoop.util.ServicePlugin;
|
|||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
|
@ -782,6 +783,26 @@ public class NameNode {
|
|||
return initializeSharedEdits(conf, force, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clone the supplied configuration but remove the shared edits dirs.
|
||||
*
|
||||
* @param conf Supplies the original configuration.
|
||||
* @return Cloned configuration without the shared edit dirs.
|
||||
* @throws IOException on failure to generate the configuration.
|
||||
*/
|
||||
private static Configuration getConfigurationWithoutSharedEdits(
|
||||
Configuration conf)
|
||||
throws IOException {
|
||||
List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false);
|
||||
String editsDirsString = Joiner.on(",").join(editsDirs);
|
||||
|
||||
Configuration confWithoutShared = new Configuration(conf);
|
||||
confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
|
||||
confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||
editsDirsString);
|
||||
return confWithoutShared;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a new shared edits dir and copy in enough edit log segments so that
|
||||
* the standby NN can start up.
|
||||
|
@ -811,11 +832,8 @@ public class NameNode {
|
|||
|
||||
NNStorage existingStorage = null;
|
||||
try {
|
||||
Configuration confWithoutShared = new Configuration(conf);
|
||||
confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
|
||||
FSNamesystem fsns = FSNamesystem.loadFromDisk(confWithoutShared,
|
||||
FSNamesystem.getNamespaceDirs(conf),
|
||||
FSNamesystem.getNamespaceEditsDirs(conf, false));
|
||||
FSNamesystem fsns =
|
||||
FSNamesystem.loadFromDisk(getConfigurationWithoutSharedEdits(conf));
|
||||
|
||||
existingStorage = fsns.getFSImage().getStorage();
|
||||
NamespaceInfo nsInfo = existingStorage.getNamespaceInfo();
|
||||
|
|
|
@ -319,6 +319,88 @@ public class TestNameEditsConfigs {
|
|||
new File(storageDir, "current"), NameNodeDirType.IMAGE_AND_EDITS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test edits.dir.required configuration options.
|
||||
* 1. Directory present in dfs.namenode.edits.dir.required but not in
|
||||
* dfs.namenode.edits.dir. Expected to fail.
|
||||
* 2. Directory present in both dfs.namenode.edits.dir.required and
|
||||
* dfs.namenode.edits.dir. Expected to succeed.
|
||||
* 3. Directory present only in dfs.namenode.edits.dir. Expected to
|
||||
* succeed.
|
||||
*/
|
||||
@Test
|
||||
public void testNameEditsRequiredConfigs() throws IOException {
|
||||
MiniDFSCluster cluster = null;
|
||||
File nameAndEditsDir = new File(base_dir, "name_and_edits");
|
||||
File nameAndEditsDir2 = new File(base_dir, "name_and_edits2");
|
||||
|
||||
// 1
|
||||
// Bad configuration. Add a directory to dfs.namenode.edits.dir.required
|
||||
// without adding it to dfs.namenode.edits.dir.
|
||||
try {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.set(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,
|
||||
nameAndEditsDir2.toURI().toString());
|
||||
conf.set(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||
nameAndEditsDir.toURI().toString());
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(NUM_DATA_NODES)
|
||||
.manageNameDfsDirs(false)
|
||||
.build();
|
||||
fail("Successfully started cluster but should not have been able to.");
|
||||
} catch (IllegalArgumentException iae) { // expect to fail
|
||||
LOG.info("EXPECTED: cluster start failed due to bad configuration" + iae);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
cluster = null;
|
||||
}
|
||||
|
||||
// 2
|
||||
// Good configuration. Add a directory to both dfs.namenode.edits.dir.required
|
||||
// and dfs.namenode.edits.dir.
|
||||
try {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setStrings(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||
nameAndEditsDir.toURI().toString(),
|
||||
nameAndEditsDir2.toURI().toString());
|
||||
conf.set(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,
|
||||
nameAndEditsDir2.toURI().toString());
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(NUM_DATA_NODES)
|
||||
.manageNameDfsDirs(false)
|
||||
.build();
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
// 3
|
||||
// Good configuration. Adds a directory to dfs.namenode.edits.dir but not to
|
||||
// dfs.namenode.edits.dir.required.
|
||||
try {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setStrings(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||
nameAndEditsDir.toURI().toString(),
|
||||
nameAndEditsDir2.toURI().toString());
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(NUM_DATA_NODES)
|
||||
.manageNameDfsDirs(false)
|
||||
.build();
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
|
||||
* This test tries to simulate failure scenarios.
|
||||
|
|
Loading…
Reference in New Issue