HDFS-4705. Address HDFS test failures on Windows because of invalid dfs.namenode.name.dir. Contributed by Ivan Mitic.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1476610 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e2d901f4da
commit
3e00835dad
|
@ -323,6 +323,9 @@ Trunk (Unreleased)
|
|||
HDFS-4722. TestGetConf#testFederation times out on Windows.
|
||||
(Ivan Mitic via suresh)
|
||||
|
||||
HDFS-4705. Address HDFS test failures on Windows because of invalid
|
||||
dfs.namenode.name.dir. (Ivan Mitic via suresh)
|
||||
|
||||
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
|
||||
|
|
|
@ -169,6 +169,8 @@ public class TestAllowFormat {
|
|||
InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
|
||||
HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
|
||||
|
||||
conf.set(DFS_NAMENODE_NAME_DIR_KEY,
|
||||
new File(hdfsDir, "name").getAbsolutePath());
|
||||
conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
|
||||
conf.set(DFSUtil.addKeySuffixes(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
|
||||
|
|
|
@ -750,9 +750,12 @@ public class TestCheckpoint {
|
|||
@Test
|
||||
public void testSeparateEditsDirLocking() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
|
||||
"/testSeparateEditsDirLocking");
|
||||
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
|
||||
File editsDir = new File(MiniDFSCluster.getBaseDirectory(),
|
||||
"testSeparateEditsDirLocking");
|
||||
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
nameDir.getAbsolutePath());
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||
editsDir.getAbsolutePath());
|
||||
MiniDFSCluster cluster = null;
|
||||
|
|
|
@ -19,20 +19,30 @@
|
|||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestFSNamesystem {
|
||||
|
||||
@After
|
||||
public void cleanUp() {
|
||||
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that the namenode edits dirs are gotten with duplicates removed
|
||||
*/
|
||||
|
@ -54,6 +64,9 @@ public class TestFSNamesystem {
|
|||
@Test
|
||||
public void testFSNamespaceClearLeases() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
|
||||
conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
|
||||
|
||||
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
|
||||
DFSTestUtil.formatNameNode(conf);
|
||||
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
|
||||
|
|
|
@ -17,23 +17,35 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestNNThroughputBenchmark {
|
||||
|
||||
@After
|
||||
public void cleanUp() {
|
||||
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
|
||||
}
|
||||
|
||||
/**
|
||||
* This test runs all benchmarks defined in {@link NNThroughputBenchmark}.
|
||||
*/
|
||||
@Test
|
||||
public void testNNThroughput() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
nameDir.getAbsolutePath());
|
||||
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
|
||||
DFSTestUtil.formatNameNode(conf);
|
||||
|
|
|
@ -322,12 +322,15 @@ public class TestNameEditsConfigs {
|
|||
MiniDFSCluster cluster = null;
|
||||
File nameAndEditsDir = new File(base_dir, "name_and_edits");
|
||||
File nameAndEditsDir2 = new File(base_dir, "name_and_edits2");
|
||||
File nameDir = new File(base_dir, "name");
|
||||
|
||||
// 1
|
||||
// Bad configuration. Add a directory to dfs.namenode.edits.dir.required
|
||||
// without adding it to dfs.namenode.edits.dir.
|
||||
try {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
nameDir.getAbsolutePath());
|
||||
conf.set(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,
|
||||
nameAndEditsDir2.toURI().toString());
|
||||
|
@ -353,6 +356,8 @@ public class TestNameEditsConfigs {
|
|||
// and dfs.namenode.edits.dir.
|
||||
try {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
nameDir.getAbsolutePath());
|
||||
conf.setStrings(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||
nameAndEditsDir.toURI().toString(),
|
||||
|
@ -375,6 +380,8 @@ public class TestNameEditsConfigs {
|
|||
// dfs.namenode.edits.dir.required.
|
||||
try {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
nameDir.getAbsolutePath());
|
||||
conf.setStrings(
|
||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||
nameAndEditsDir.toURI().toString(),
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
|||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -39,6 +40,11 @@ import org.junit.Test;
|
|||
*/
|
||||
public class TestValidateConfigurationSettings {
|
||||
|
||||
@After
|
||||
public void cleanUp() {
|
||||
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests setting the rpc port to the same as the web port to test that
|
||||
* an exception
|
||||
|
@ -49,6 +55,10 @@ public class TestValidateConfigurationSettings {
|
|||
throws IOException {
|
||||
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
nameDir.getAbsolutePath());
|
||||
|
||||
// set both of these to port 9000, should fail
|
||||
FileSystem.setDefaultUri(conf, "hdfs://localhost:9000");
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:9000");
|
||||
|
@ -72,6 +82,10 @@ public class TestValidateConfigurationSettings {
|
|||
throws IOException {
|
||||
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
nameDir.getAbsolutePath());
|
||||
|
||||
FileSystem.setDefaultUri(conf, "hdfs://localhost:8000");
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:9000");
|
||||
DFSTestUtil.formatNameNode(conf);
|
||||
|
|
Loading…
Reference in New Issue