HDFS-2799. Trim fs.checkpoint.dir values. Contributed by Amith D K

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325963 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-04-13 21:31:27 +00:00
parent 27ea3ab6ba
commit 5a20d446cf
3 changed files with 48 additions and 3 deletions

View File

@ -509,6 +509,8 @@ Release 2.0.0 - UNRELEASED
HDFS-3256. HDFS considers blocks under-replicated if topology script is
configured with only 1 rack. (atm)
HDFS-2799. Trim fs.checkpoint.dir values. (Amith D K via eli)
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

View File

@ -1076,7 +1076,8 @@ public class FSImage implements Closeable {
*/
static Collection<URI> getCheckpointDirs(Configuration conf,
String defaultValue) {
Collection<String> dirNames = conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
Collection<String> dirNames = conf.getTrimmedStringCollection(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
if (dirNames.size() == 0 && defaultValue != null) {
dirNames.add(defaultValue);
}
@ -1085,8 +1086,8 @@ public class FSImage implements Closeable {
static List<URI> getCheckpointEditsDirs(Configuration conf,
String defaultName) {
Collection<String> dirNames =
conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
Collection<String> dirNames = conf.getTrimmedStringCollection(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
if (dirNames.size() == 0 && defaultName != null) {
dirNames.add(defaultName);
}

View File

@ -490,4 +490,46 @@ public class TestNameEditsConfigs {
cluster.shutdown();
}
}
/**
* Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir
* should tolerate white space between values.
*/
@Test
public void testCheckPointDirsAreTrimmed() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File checkpointNameDir1 = new File(base_dir, "chkptName1");
File checkpointEditsDir1 = new File(base_dir, "chkptEdits1");
File checkpointNameDir2 = new File(base_dir, "chkptName2");
File checkpointEditsDir2 = new File(base_dir, "chkptEdits2");
File nameDir = new File(base_dir, "name1");
String whiteSpace = " \n \n ";
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getPath());
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, whiteSpace
+ checkpointNameDir1.getPath() + whiteSpace, whiteSpace
+ checkpointNameDir2.getPath() + whiteSpace);
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
whiteSpace + checkpointEditsDir1.getPath() + whiteSpace, whiteSpace
+ checkpointEditsDir2.getPath() + whiteSpace);
cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
.numDataNodes(3).build();
try {
cluster.waitActive();
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
checkpointNameDir1.exists());
assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
checkpointNameDir2.exists());
assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
+ " must be trimmed ", checkpointEditsDir1.exists());
assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
+ " must be trimmed ", checkpointEditsDir2.exists());
} finally {
secondary.shutdown();
cluster.shutdown();
}
}
}