diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index af5a7f4ee95..6f7fa0f0259 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -780,9 +780,7 @@
org.apache.maven.plugins
maven-surefire-plugin
- perthread
- ${testsThreadCount}
- classes
+ ${testsThreadCount}
-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 293de2d515d..2015f2954f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -267,6 +267,8 @@ Release 2.3.0 - UNRELEASED
HDFS-5144. Document time unit to NameNodeMetrics. (Akira Ajisaka via
suresh)
+ HDFS-4491. Parallel testing HDFS. (Andrey Klochkov via cnauroth)
+
OPTIMIZATIONS
BUG FIXES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 59abffa0f96..2160d83bac3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -700,5 +700,44 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ parallel-tests
+
+
+
+ maven-antrun-plugin
+
+
+ create-parallel-tests-dirs
+ test-compile
+
+
+
+
+
+
+
+
+
+ run
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+ ${testsThreadCount}
+ -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true
+
+ ${test.build.data}/${surefire.forkNumber}
+ ${hadoop.tmp.dir}/${surefire.forkNumber}
+
+
+
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
index 9d9cde4dc02..dd5e9c6daa0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
@@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
-import org.apache.hadoop.hdfs.web.URLUtils;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
@@ -86,6 +86,8 @@ public class HftpFileSystem extends FileSystem
HttpURLConnection.setFollowRedirects(true);
}
+ URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+
public static final Text TOKEN_KIND = new Text("HFTP delegation");
protected UserGroupInformation ugi;
@@ -331,8 +333,8 @@ public class HftpFileSystem extends FileSystem
throws IOException {
query = addDelegationTokenParam(query);
final URL url = getNamenodeURL(path, query);
- final HttpURLConnection connection =
- (HttpURLConnection)URLUtils.openConnection(url);
+ final HttpURLConnection connection;
+ connection = (HttpURLConnection)connectionFactory.openConnection(url);
connection.setRequestMethod("GET");
connection.connect();
return connection;
@@ -352,12 +354,14 @@ public class HftpFileSystem extends FileSystem
}
static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
+ URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+
RangeHeaderUrlOpener(final URL url) {
super(url);
}
protected HttpURLConnection openConnection() throws IOException {
- return (HttpURLConnection)URLUtils.openConnection(url);
+ return (HttpURLConnection)connectionFactory.openConnection(url);
}
/** Use HTTP Range header for specifying offset. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
index 438d56e52f0..6a3bdba593b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
@@ -40,7 +40,6 @@ import javax.net.ssl.X509TrustManager;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.web.URLUtils;
import org.apache.hadoop.util.Time;
/**
@@ -154,7 +153,8 @@ public class HsftpFileSystem extends HftpFileSystem {
query = addDelegationTokenParam(query);
final URL url = new URL("https", nnUri.getHost(),
nnUri.getPort(), path + '?' + query);
- HttpsURLConnection conn = (HttpsURLConnection)URLUtils.openConnection(url);
+ HttpsURLConnection conn;
+ conn = (HttpsURLConnection)connectionFactory.openConnection(url);
// bypass hostname verification
conn.setHostnameVerifier(new DummyHostnameVerifier());
conn.setRequestMethod("GET");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 9dbb01b3942..8f1c5895f40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -119,13 +119,15 @@ public class WebHdfsFileSystem extends FileSystem
/** SPNEGO authenticator */
private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
+ /** Default connection factory may be overriden in tests to use smaller timeout values */
+ URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
/** Configures connections for AuthenticatedURL */
- private static final ConnectionConfigurator CONN_CONFIGURATOR =
+ private final ConnectionConfigurator CONN_CONFIGURATOR =
new ConnectionConfigurator() {
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
- URLUtils.setTimeouts(conn);
+ connectionFactory.setTimeouts(conn);
return conn;
}
};
@@ -479,10 +481,9 @@ public class WebHdfsFileSystem extends FileSystem
final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
conn = new AuthenticatedURL(AUTH, CONN_CONFIGURATOR).openConnection(
url, authToken);
- URLUtils.setTimeouts(conn);
} else {
LOG.debug("open URL connection");
- conn = (HttpURLConnection)URLUtils.openConnection(url);
+ conn = (HttpURLConnection)connectionFactory.openConnection(url);
}
} catch (AuthenticationException e) {
throw new IOException(e);
@@ -577,7 +578,7 @@ public class WebHdfsFileSystem extends FileSystem
checkRetry = false;
//Step 2) Submit another Http request with the URL from the Location header with data.
- conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect));
+ conn = (HttpURLConnection)connectionFactory.openConnection(new URL(redirect));
conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
connect();
@@ -600,7 +601,7 @@ public class WebHdfsFileSystem extends FileSystem
disconnect();
checkRetry = false;
- conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect));
+ conn = (HttpURLConnection)connectionFactory.openConnection(new URL(redirect));
connect();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java
index 794c0571944..5ad39304da3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.fs;
+import java.io.File;
import java.io.IOException;
import java.util.EnumSet;
@@ -27,6 +28,7 @@ import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.test.PathUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -56,8 +58,7 @@ public class TestFiRename {
private static String addChild = "";
private static byte[] data = { 0 };
- private static String TEST_ROOT_DIR =
- System.getProperty("test.build.data", "/tmp") + "/test";
+ private static String TEST_ROOT_DIR = PathUtils.getTestDirName(TestFiRename.class);
private static Configuration CONF = new Configuration();
static {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
index c48759e1f5e..cfd4a8d418c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.fs;
+import java.io.File;
import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -48,6 +50,7 @@ import org.junit.Test;
* underlying file system as Hdfs.
*/
public class TestResolveHdfsSymlink {
+ private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestResolveHdfsSymlink.class);
private static MiniDFSCluster cluster = null;
@BeforeClass
@@ -80,12 +83,12 @@ public class TestResolveHdfsSymlink {
.getUri());
Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri()
- .toString(), "/tmp/alpha");
+ .toString(), new File(TEST_ROOT_DIR, "alpha").getAbsolutePath());
DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16,
(short) 1, 2);
Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri()
- .toString(), "/tmp");
+ .toString(), TEST_ROOT_DIR.getAbsolutePath());
Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),
"/tmp/link");
fcHdfs.createSymlink(linkTarget, hdfsLink, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
index d28736cffae..845eb6314ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
@@ -31,6 +31,7 @@ import java.net.URL;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.test.PathUtils;
import org.junit.Test;
/**
@@ -38,6 +39,8 @@ import org.junit.Test;
*/
public class TestUrlStreamHandler {
+ private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestUrlStreamHandler.class);
+
/**
* Test opening and reading from an InputStream through a hdfs:// URL.
*
@@ -111,13 +114,12 @@ public class TestUrlStreamHandler {
Configuration conf = new HdfsConfiguration();
// Locate the test temporary directory.
- File tmpDir = new File(conf.get("hadoop.tmp.dir"));
- if (!tmpDir.exists()) {
- if (!tmpDir.mkdirs())
- throw new IOException("Cannot create temporary directory: " + tmpDir);
+ if (!TEST_ROOT_DIR.exists()) {
+ if (!TEST_ROOT_DIR.mkdirs())
+ throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR);
}
- File tmpFile = new File(tmpDir, "thefile");
+ File tmpFile = new File(TEST_ROOT_DIR, "thefile");
URI uri = tmpFile.toURI();
FileSystem fs = FileSystem.get(uri, conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
index e9e14ce8b11..6c7bac31ea8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@@ -39,8 +40,7 @@ import org.junit.Test;
public class TestLoadGenerator extends Configured implements Tool {
private static final Configuration CONF = new HdfsConfiguration();
private static final int DEFAULT_BLOCK_SIZE = 10;
- private static final String OUT_DIR =
- System.getProperty("test.build.data","build/test/data");
+ private static final File OUT_DIR = PathUtils.getTestDir(TestLoadGenerator.class);
private static final File DIR_STRUCTURE_FILE =
new File(OUT_DIR, StructureGenerator.DIR_STRUCTURE_FILE_NAME);
private static final File FILE_STRUCTURE_FILE =
@@ -65,7 +65,7 @@ public class TestLoadGenerator extends Configured implements Tool {
StructureGenerator sg = new StructureGenerator();
String[] args = new String[]{"-maxDepth", "2", "-minWidth", "1",
"-maxWidth", "2", "-numOfFiles", "2",
- "-avgFileSize", "1", "-outDir", OUT_DIR, "-seed", "1"};
+ "-avgFileSize", "1", "-outDir", OUT_DIR.getAbsolutePath(), "-seed", "1"};
final int MAX_DEPTH = 1;
final int MIN_WIDTH = 3;
@@ -133,8 +133,7 @@ public class TestLoadGenerator extends Configured implements Tool {
public void testLoadGenerator() throws Exception {
final String TEST_SPACE_ROOT = "/test";
- final String SCRIPT_TEST_DIR = new File(System.getProperty("test.build.data",
- "/tmp")).getAbsolutePath();
+ final String SCRIPT_TEST_DIR = OUT_DIR.getAbsolutePath();
String script = SCRIPT_TEST_DIR + "/" + "loadgenscript";
String script2 = SCRIPT_TEST_DIR + "/" + "loadgenscript2";
File scriptFile1 = new File(script);
@@ -156,7 +155,7 @@ public class TestLoadGenerator extends Configured implements Tool {
try {
DataGenerator dg = new DataGenerator();
dg.setConf(CONF);
- String [] args = new String[] {"-inDir", OUT_DIR, "-root", TEST_SPACE_ROOT};
+ String [] args = new String[] {"-inDir", OUT_DIR.getAbsolutePath(), "-root", TEST_SPACE_ROOT};
assertEquals(0, dg.run(args));
final int READ_PROBABILITY = 1;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 7090d498444..222a9b77ef5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1407,6 +1407,13 @@ public class MiniDFSCluster {
* Shutdown all the nodes in the cluster.
*/
public void shutdown() {
+ shutdown(false);
+ }
+
+ /**
+ * Shutdown all the nodes in the cluster.
+ */
+ public void shutdown(boolean deleteDfsDir) {
LOG.info("Shutting down the Mini HDFS Cluster");
if (checkExitOnShutdown) {
if (ExitUtil.terminateCalled()) {
@@ -1426,6 +1433,11 @@ public class MiniDFSCluster {
nameNode = null;
}
}
+ if (deleteDfsDir) {
+ base_dir.delete();
+ } else {
+ base_dir.deleteOnExit();
+ }
}
/**
@@ -2118,7 +2130,7 @@ public class MiniDFSCluster {
*
/data/data<2*dnIndex + 1>
* /data/data<2*dnIndex + 2>
*
- *
+ *
* @param dnIndex datanode index (starts from 0)
* @param dirIndex directory index (0 or 1). Index 0 provides access to the
* first storage directory. Index 1 provides access to the second
@@ -2149,7 +2161,7 @@ public class MiniDFSCluster {
public static String getDNCurrentDir(File storageDir) {
return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/";
}
-
+
/**
* Get directory corresponding to block pool directory in the datanode
* @param storageDir the storage directory of a datanode.
@@ -2255,7 +2267,7 @@ public class MiniDFSCluster {
}
return null;
}
-
+
/**
* Get the block metadata file for a block from a given datanode
*
@@ -2343,14 +2355,17 @@ public class MiniDFSCluster {
} else {
if (checkDataNodeAddrConfig) {
conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
- conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
- conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
} else {
conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
- conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
- conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
}
}
+ if (checkDataNodeAddrConfig) {
+ conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
+ conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
+ } else {
+ conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
+ conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
+ }
}
private void addToFile(String p, String address) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
index 51fab6653f3..74c763d5aa7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
@@ -71,10 +71,6 @@ public class TestClientReportBadBlock {
@Before
public void startUpCluster() throws IOException {
- if (System.getProperty("test.build.data") == null) { // to allow test to be
- // run outside of Ant
- System.setProperty("test.build.data", "build/test/data");
- }
// disable block scanner
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
index e54e2777c44..fbcce3946ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
@@ -20,9 +20,6 @@ package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.junit.Assert.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index e7d82688a9d..42873785f37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
@@ -61,9 +62,7 @@ public class TestDFSShell {
private static final Log LOG = LogFactory.getLog(TestDFSShell.class);
private static AtomicInteger counter = new AtomicInteger();
- static final String TEST_ROOT_DIR =
- new Path(System.getProperty("test.build.data","/tmp"))
- .toString().replace(' ', '+');
+ static final String TEST_ROOT_DIR = PathUtils.getTestDirName(TestDFSShell.class);
static Path writeFile(FileSystem fs, Path f) throws IOException {
DataOutputStream out = fs.create(f);
@@ -482,12 +481,11 @@ public class TestDFSShell {
Configuration dstConf = new HdfsConfiguration();
MiniDFSCluster srcCluster = null;
MiniDFSCluster dstCluster = null;
- String bak = System.getProperty("test.build.data");
+ File bak = new File(PathUtils.getTestDir(getClass()), "dfs_tmp_uri");
+ bak.mkdirs();
try{
srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
- File nameDir = new File(new File(bak), "dfs_tmp_uri/");
- nameDir.mkdirs();
- System.setProperty("test.build.data", nameDir.toString());
+ dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, bak.getAbsolutePath());
dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
FileSystem srcFs = srcCluster.getFileSystem();
FileSystem dstFs = dstCluster.getFileSystem();
@@ -559,7 +557,6 @@ public class TestDFSShell {
ret = ToolRunner.run(shell, argv);
assertEquals("default works for rm/rmr", 0, ret);
} finally {
- System.setProperty("test.build.data", bak);
if (null != srcCluster) {
srcCluster.shutdown();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 05d22961dd0..9c2e038f3f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.test.PathUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -75,7 +76,7 @@ public class TestDecommission {
// Set up the hosts/exclude files.
localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
- Path dir = new Path(workingDir, System.getProperty("test.build.data", "target/test/data") + "/work-dir/decommission");
+ Path dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
hostsFile = new Path(dir, "hosts");
excludeFile = new Path(dir, "exclude");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 9b4f3130d7f..c0093d21aa7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -25,6 +25,8 @@ import static org.junit.Assert.fail;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
+
+import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
@@ -73,6 +75,9 @@ public class TestDistributedFileSystem {
HdfsConfiguration conf;
if (noXmlDefaults) {
conf = new HdfsConfiguration(false);
+ String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name").getAbsolutePath();
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir);
+ conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir);
} else {
conf = new HdfsConfiguration();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
index 2f2c9a4fd6e..5fc567a2131 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.PathUtils;
import org.junit.Test;
/**
@@ -260,7 +261,7 @@ public class TestFSInputChecker {
// create a file and verify that checksum corruption results in
// a checksum exception on LocalFS
- String dir = System.getProperty("test.build.data", ".");
+ String dir = PathUtils.getTestDirName(getClass());
Path file = new Path(dir + "/corruption-test.dat");
Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
index d086c77a9bf..570b19f8d2e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.util.Holder;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
import org.junit.Test;
/**
@@ -145,7 +146,7 @@ public class TestFileAppendRestart {
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + HADOOP_23_BROKEN_APPEND_TGZ;
- String testDir = System.getProperty("test.build.data", "build/test/data");
+ String testDir = PathUtils.getTestDirName(getClass());
File dfsDir = new File(testDir, "image-with-buggy-append");
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 458880af566..6531fe7c050 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@@ -95,7 +96,7 @@ public class TestFileCorruption {
@Test
public void testLocalFileCorruption() throws Exception {
Configuration conf = new HdfsConfiguration();
- Path file = new Path(System.getProperty("test.build.data"), "corruptFile");
+ Path file = new Path(PathUtils.getTestDirName(getClass()), "corruptFile");
FileSystem fs = FileSystem.getLocal(conf);
DataOutputStream dos = fs.create(file);
dos.writeBytes("original bytes");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
index 036252ddc31..59d1615025d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.namenode.BackupNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.test.PathUtils;
import org.junit.Test;
/**
@@ -53,6 +54,9 @@ public class TestHDFSServerPorts {
// reset default 0.0.0.0 addresses in order to avoid IPv6 problem
static final String THIS_HOST = getFullHostName() + ":0";
+
+ private static final File TEST_DATA_DIR = PathUtils.getTestDir(TestHDFSServerPorts.class);
+
static {
DefaultMetricsSystem.setMiniClusterMode(true);
}
@@ -81,13 +85,6 @@ public class TestHDFSServerPorts {
}
}
- /**
- * Get base directory these tests should run in.
- */
- private String getTestingDir() {
- return System.getProperty("test.build.data", "build/test/data");
- }
-
public NameNode startNameNode() throws IOException {
return startNameNode(false);
}
@@ -95,8 +92,7 @@ public class TestHDFSServerPorts {
* Start the namenode.
*/
public NameNode startNameNode(boolean withService) throws IOException {
- String dataDir = getTestingDir();
- hdfsDir = new File(dataDir, "dfs");
+ hdfsDir = new File(TEST_DATA_DIR, "dfs");
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
@@ -119,9 +115,8 @@ public class TestHDFSServerPorts {
* Start the BackupNode
*/
public BackupNode startBackupNode(Configuration conf) throws IOException {
- String dataDir = getTestingDir();
// Set up testing environment directories
- hdfsDir = new File(dataDir, "backupNode");
+ hdfsDir = new File(TEST_DATA_DIR, "backupNode");
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
@@ -150,8 +145,7 @@ public class TestHDFSServerPorts {
*/
public DataNode startDataNode(int index, Configuration config)
throws IOException {
- String dataDir = getTestingDir();
- File dataNodeDir = new File(dataDir, "data-" + index);
+ File dataNodeDir = new File(TEST_DATA_DIR, "data-" + index);
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());
String[] args = new String[] {};
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java
index d9a22c10111..56bd21ef2ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java
@@ -33,16 +33,11 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.web.URLUtils;
-import org.junit.BeforeClass;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.junit.Test;
public class TestHftpURLTimeouts {
- @BeforeClass
- public static void setup() {
- URLUtils.SOCKET_TIMEOUT = 5;
- }
-
+
@Test
public void testHftpSocketTimeout() throws Exception {
Configuration conf = new Configuration();
@@ -51,9 +46,11 @@ public class TestHftpURLTimeouts {
InetAddress.getByName(null).getHostAddress(),
socket.getLocalPort(),
null, null, null);
- boolean timedout = false;
HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf);
+ fs.connectionFactory = new URLConnectionFactory(5);
+
+ boolean timedout = false;
try {
HttpURLConnection conn = fs.openConnection("/", "");
timedout = false;
@@ -69,6 +66,7 @@ public class TestHftpURLTimeouts {
assertTrue("read timedout", timedout);
assertTrue("connect timedout", checkConnectTimeout(fs, false));
} finally {
+ fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
fs.close();
}
}
@@ -84,6 +82,8 @@ public class TestHftpURLTimeouts {
boolean timedout = false;
HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf);
+ fs.connectionFactory = new URLConnectionFactory(5);
+
try {
HttpURLConnection conn = null;
timedout = false;
@@ -100,6 +100,7 @@ public class TestHftpURLTimeouts {
assertTrue("ssl read connect timedout", timedout);
assertTrue("connect timedout", checkConnectTimeout(fs, true));
} finally {
+ fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
fs.close();
}
}
@@ -121,7 +122,7 @@ public class TestHftpURLTimeouts {
// https will get a read timeout due to SSL negotiation, but
// a normal http will not, so need to ignore SSL read timeouts
// until a connect timeout occurs
- if (!(ignoreReadTimeout && message.equals("Read timed out"))) {
+ if (!(ignoreReadTimeout && "Read timed out".equals(message))) {
timedout = true;
assertEquals("connect timed out", message);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index 1400f07e062..84678da2d40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -25,8 +25,8 @@ import static org.junit.Assume.assumeTrue;
import java.io.File;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.junit.After;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.test.PathUtils;
import org.junit.Before;
import org.junit.Test;
@@ -43,20 +43,10 @@ public class TestMiniDFSCluster {
private static final String CLUSTER_3 = "cluster3";
private static final String CLUSTER_4 = "cluster4";
private static final String CLUSTER_5 = "cluster5";
- protected String testDataPath;
- protected File testDataDir;
+ protected File testDataPath;
@Before
public void setUp() {
- testDataPath = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA,
- "build/test/data");
- testDataDir = new File(new File(testDataPath).getParentFile(),
- "miniclusters");
-
-
- }
- @After
- public void tearDown() {
- System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testDataPath);
+ testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters");
}
/**
@@ -120,7 +110,7 @@ public class TestMiniDFSCluster {
MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
try {
DistributedFileSystem dfs = (DistributedFileSystem) cluster4.getFileSystem();
- dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
+ dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
cluster4.shutdown();
} finally {
while(cluster4.isClusterUp()){
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
index 5077a6dfdfd..424cc77a19d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@@ -322,7 +323,7 @@ public class TestPersistBlocks {
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
- String testDir = System.getProperty("test.build.data", "build/test/data");
+ String testDir = PathUtils.getTestDirName(getClass());
File dfsDir = new File(testDir, "image-1.0");
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
index cfe463c4356..a7eed468e62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
@@ -174,6 +174,7 @@ public class TestNNWithQJM {
public void testMismatchedNNIsRejected() throws Exception {
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
+ String defaultEditsDir = conf.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
mjc.getQuorumJournalURI("myjournal").toString());
@@ -187,7 +188,7 @@ public class TestNNWithQJM {
// Reformat just the on-disk portion
Configuration onDiskOnly = new Configuration(conf);
- onDiskOnly.unset(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
+ onDiskOnly.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, defaultEditsDir);
NameNode.format(onDiskOnly);
// Start the NN - should fail because the JNs are still formatted
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
index e6e140443bf..79ca6ca72bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MetricsAsserts;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Shell;
import org.junit.After;
import org.junit.Before;
@@ -61,13 +62,13 @@ public class TestJournalNode {
private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
12345, "mycluster", "my-bp", 0L);
+ private static File TEST_BUILD_DATA = PathUtils.getTestDir(TestJournalNode.class);
+
private JournalNode jn;
private Journal journal;
private Configuration conf = new Configuration();
private IPCLoggerChannel ch;
private String journalId;
- private File TEST_BUILD_DATA =
- new File(System.getProperty("test.build.data", "build/test/data"));
static {
// Avoid an error when we double-initialize JvmMetrics
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
index 79785961c91..972a785f5d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
-import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index ba6c3737266..5f15d0e2cb9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
@@ -92,8 +93,7 @@ public class TestReplicationPolicy {
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
- File baseDir = new File(System.getProperty(
- "test.build.data", "build/test/data"), "dfs/");
+ File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
index 032c2c08396..c453f198e1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -41,6 +40,9 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
+import org.apache.hadoop.test.PathUtils;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
public class TestReplicationPolicyWithNodeGroup {
@@ -48,10 +50,10 @@ public class TestReplicationPolicyWithNodeGroup {
private static final int NUM_OF_DATANODES = 8;
private static final int NUM_OF_DATANODES_BOUNDARY = 6;
private static final int NUM_OF_DATANODES_MORE_TARGETS = 12;
- private static final Configuration CONF = new HdfsConfiguration();
- private static final NetworkTopology cluster;
- private static final NameNode namenode;
- private static final BlockPlacementPolicy replicator;
+ private final Configuration CONF = new HdfsConfiguration();
+ private NetworkTopology cluster;
+ private NameNode namenode;
+ private BlockPlacementPolicy replicator;
private static final String filename = "/dummyfile.txt";
private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
@@ -94,27 +96,23 @@ public class TestReplicationPolicyWithNodeGroup {
private final static DatanodeDescriptor NODE =
new DatanodeDescriptor(DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d2/r4/n7"));
- static {
- try {
- FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
- CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
- // Set properties to make HDFS aware of NodeGroup.
- CONF.set("dfs.block.replicator.classname",
- "org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup");
- CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
- "org.apache.hadoop.net.NetworkTopologyWithNodeGroup");
-
- File baseDir = new File(System.getProperty(
- "test.build.data", "build/test/data"), "dfs/");
- CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
- new File(baseDir, "name").getPath());
-
- DFSTestUtil.formatNameNode(CONF);
- namenode = new NameNode(CONF);
- } catch (IOException e) {
- e.printStackTrace();
- throw (RuntimeException)new RuntimeException().initCause(e);
- }
+ @Before
+ public void setUp() throws Exception {
+ FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
+ CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+ // Set properties to make HDFS aware of NodeGroup.
+ CONF.set("dfs.block.replicator.classname",
+ "org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup");
+ CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
+ "org.apache.hadoop.net.NetworkTopologyWithNodeGroup");
+
+ File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
+
+ CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ new File(baseDir, "name").getPath());
+
+ DFSTestUtil.formatNameNode(CONF);
+ namenode = new NameNode(CONF);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
@@ -125,6 +123,11 @@ public class TestReplicationPolicyWithNodeGroup {
setupDataNodeCapacity();
}
+ @After
+ public void tearDown() throws Exception {
+ namenode.stop();
+ }
+
private static void setupDataNodeCapacity() {
for(int i=0; i fsImageDirs = new ArrayList();
ArrayList editsDirs = new ArrayList();
File filePath =
- new File(System.getProperty("test.build.data","/tmp"), "storageDirToCheck");
+ new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
assertTrue("Couldn't create directory storageDirToCheck",
filePath.exists() || filePath.mkdirs());
fsImageDirs.add(filePath.toURI());
@@ -1911,9 +1912,11 @@ public class TestCheckpoint {
}
// Start a new NN with the same host/port.
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
- .nameNodePort(origPort).nameNodeHttpPort(origHttpPort).format(true)
- .build();
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(0)
+ .nameNodePort(origPort)
+ .nameNodeHttpPort(origHttpPort)
+ .format(true).build();
try {
secondary.doCheckpoint();
@@ -2135,7 +2138,8 @@ public class TestCheckpoint {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
try {
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(0)
.format(true).build();
FileSystem fs = cluster.getFileSystem();
secondary = startSecondaryNameNode(conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
index 4330317d6ff..7c23dd55df5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.junit.After;
@@ -72,7 +73,7 @@ public class TestClusterId {
public void setUp() throws IOException {
ExitUtil.disableSystemExit();
- String baseDir = System.getProperty("test.build.data", "build/test/data");
+ String baseDir = PathUtils.getTestDirName(getClass());
hdfsDir = new File(baseDir, "dfs/name");
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index f83a531b463..1be4a228895 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
@@ -96,9 +97,8 @@ public class TestEditLog {
static final int NUM_TRANSACTIONS = 100;
static final int NUM_THREADS = 100;
- static final File TEST_DIR = new File(
- System.getProperty("test.build.data","build/test/data"));
-
+ static final File TEST_DIR = PathUtils.getTestDir(TestEditLog.class);
+
/** An edits log with 3 edits from 0.20 - the result of
* a fresh namesystem followed by hadoop fs -touchz /myfile */
static final byte[] HADOOP20_SOME_EDITS =
@@ -569,6 +569,7 @@ public class TestEditLog {
fail("should not be able to start");
} catch (IOException e) {
// expected
+ assertNotNull("Cause of exception should be ChecksumException", e.getCause());
assertEquals("Cause of exception should be ChecksumException",
ChecksumException.class, e.getCause().getClass());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
index a6e170dee71..e230d5affc6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
@@ -34,8 +35,7 @@ import org.junit.Test;
* Test the EditLogFileOutputStream
*/
public class TestEditLogFileOutputStream {
- private final static File TEST_DIR =
- new File(System.getProperty("test.build.data", "/tmp"));
+ private final static File TEST_DIR = PathUtils.getTestDir(TestEditLogFileOutputStream.class);
private static final File TEST_EDITS =
new File(TEST_DIR, "testEditLogFileOutput.log");
final static int MIN_PREALLOCATION_LENGTH =
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
index 486b17c3925..610a4a2fd4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.junit.After;
import org.junit.Before;
@@ -192,8 +193,7 @@ public class TestEditLogJournalFailures {
Configuration conf = new HdfsConfiguration();
String[] nameDirs = new String[4];
for (int i = 0; i < nameDirs.length; i++) {
- File nameDir = new File(System.getProperty("test.build.data"),
- "name-dir" + i);
+ File nameDir = new File(PathUtils.getTestDir(getClass()), "name-dir" + i);
nameDir.mkdirs();
nameDirs[i] = nameDir.getAbsolutePath();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index dd637a9212d..4db7e6a7dfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@@ -60,8 +61,7 @@ public class TestFSEditLogLoader {
((Log4JLogger)FSEditLogLoader.LOG).getLogger().setLevel(Level.ALL);
}
- private static final File TEST_DIR = new File(
- System.getProperty("test.build.data","build/test/data"));
+ private static final File TEST_DIR = PathUtils.getTestDir(TestFSEditLogLoader.class);
private static final int NUM_DATA_NODES = 0;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 754e56966d3..9aaeb74a1c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -91,7 +91,7 @@ import static org.mockito.Mockito.*;
*/
public class TestFsck {
static final String auditLogFile = System.getProperty("test.build.dir",
- "build/test") + "/audit.log";
+ "build/test") + "/TestFsck-audit.log";
// Pattern for:
// allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
@@ -159,7 +159,8 @@ public class TestFsck {
cluster.shutdown();
// restart the cluster; bring up namenode but not the data nodes
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(0).format(false).build();
outStr = runFsck(conf, 1, true, "/");
// expect the result is corrupt
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
index 4e4914b3ab6..5c11c1badf2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.test.PathUtils;
import org.junit.Before;
import org.junit.Test;
@@ -60,7 +61,7 @@ public class TestNameEditsConfigs {
short replication = 3;
private File base_dir = new File(
- System.getProperty("test.build.data", "build/test/data"), "dfs/");
+ PathUtils.getTestDir(TestNameEditsConfigs.class), "dfs");
@Before
public void setUp() throws IOException {
@@ -68,7 +69,7 @@ public class TestNameEditsConfigs {
throw new IOException("Cannot remove directory " + base_dir);
}
}
-
+
void checkImageAndEditsFilesExistence(File dir,
boolean shouldHaveImages,
boolean shouldHaveEdits)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
index 42f3f5764f9..30d3e71ad8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
@@ -57,8 +58,7 @@ import com.google.common.collect.Sets;
public class TestNameNodeRecovery {
private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class);
private static StartupOption recoverStartOpt = StartupOption.RECOVER;
- private static final File TEST_DIR = new File(
- System.getProperty("test.build.data","build/test/data"));
+ private static final File TEST_DIR = PathUtils.getTestDir(TestNameNodeRecovery.class);
static {
recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
index e73d71aff70..2012b6aabe1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
@@ -33,12 +33,14 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor;
import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Time;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
public class TestNameNodeResourceChecker {
+ private final static File BASE_DIR = PathUtils.getTestDir(TestNameNodeResourceChecker.class);
private Configuration conf;
private File baseDir;
private File nameDir;
@@ -46,8 +48,7 @@ public class TestNameNodeResourceChecker {
@Before
public void setUp () throws IOException {
conf = new Configuration();
- baseDir = new File(System.getProperty("test.build.data"));
- nameDir = new File(baseDir, "resource-check-name-dir");
+ nameDir = new File(BASE_DIR, "resource-check-name-dir");
nameDir.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
}
@@ -141,8 +142,8 @@ public class TestNameNodeResourceChecker {
@Test
public void testChecking2NameDirsOnOneVolume() throws IOException {
Configuration conf = new Configuration();
- File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
- File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
+ File nameDir1 = new File(BASE_DIR, "name-dir1");
+ File nameDir2 = new File(BASE_DIR, "name-dir2");
nameDir1.mkdirs();
nameDir2.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
@@ -162,7 +163,7 @@ public class TestNameNodeResourceChecker {
@Test
public void testCheckingExtraVolumes() throws IOException {
Configuration conf = new Configuration();
- File nameDir = new File(System.getProperty("test.build.data"), "name-dir");
+ File nameDir = new File(BASE_DIR, "name-dir");
nameDir.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath());
@@ -182,8 +183,8 @@ public class TestNameNodeResourceChecker {
@Test
public void testLowResourceVolumePolicy() throws IOException, URISyntaxException {
Configuration conf = new Configuration();
- File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
- File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
+ File nameDir1 = new File(BASE_DIR, "name-dir1");
+ File nameDir2 = new File(BASE_DIR, "name-dir2");
nameDir1.mkdirs();
nameDir2.mkdirs();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 6d6dbdf79ce..86323ff5adb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger;
import org.junit.After;
@@ -404,8 +405,7 @@ public class TestStartup {
Configuration conf = new Configuration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
- File base_dir = new File(System.getProperty(
- "test.build.data", "build/test/data"), "dfs/");
+ File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(base_dir, "name").getPath());
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
index 905e3680e0b..1a612e83ec7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
@@ -391,7 +391,8 @@ public class TestStorageRestore {
(new File(path3, "current").getAbsolutePath()) : path3.toString();
try {
- cluster = new MiniDFSCluster.Builder(config).numDataNodes(0)
+ cluster = new MiniDFSCluster.Builder(config)
+ .numDataNodes(0)
.manageNameDfsDirs(false).build();
cluster.waitActive();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
index cba634fee92..5a178d19440 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.http.HttpServerFunctionalTest;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import org.mockito.Mockito;
@@ -48,8 +49,7 @@ import com.google.common.collect.ImmutableList;
public class TestTransferFsImage {
- private static final File TEST_DIR = new File(
- System.getProperty("test.build.data","build/test/data"));
+ private static final File TEST_DIR = PathUtils.getTestDir(TestTransferFsImage.class);
/**
* Regression test for HDFS-1997. Test that, if an exception
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
index 678e03866d5..502c9de4096 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
@@ -56,8 +56,8 @@ public class TestBootstrapStandby {
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
- .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
- .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
+ .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(20001))
+ .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(20002)));
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
index ce005b10a14..18972655765 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
@@ -70,13 +70,13 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0);
- conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10003);
- conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10004);
+ conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10023);
+ conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10024);
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
- .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001))
- .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002)));
+ .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021))
+ .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022)));
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
.numDataNodes(0)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
index 8675fa3fc6f..8c61c9237e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
@@ -124,8 +124,8 @@ public class TestEditLogTailer {
// Have to specify IPC ports so the NNs can talk to each other.
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
- .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001))
- .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002)));
+ .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
+ .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032)));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
index 37c0b16fabd..5ec7f7e1c1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
@@ -76,8 +76,8 @@ public class TestFailureToReadEdits {
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
- .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
- .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
+ .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10041))
+ .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10042)));
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
.numDataNodes(0)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java
index 49d89592b8a..4f848dcf834 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java
@@ -52,8 +52,8 @@ public class TestHAFsck {
// need some HTTP ports
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ha-nn-uri-0")
- .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
- .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
+ .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10051))
+ .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10052)));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index dff28740690..3ff5d54dc66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -89,8 +89,8 @@ public class TestStandbyCheckpoints {
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
- .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
- .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
+ .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061))
+ .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062)));
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
index 1d189a108de..e6c9a3f3967 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper;
import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags;
+import org.apache.hadoop.test.PathUtils;
import org.junit.Before;
import org.junit.Test;
@@ -53,7 +54,7 @@ public class TestOfflineEditsViewer {
}
private static String buildDir =
- System.getProperty("test.build.data", "build/test/data");
+ PathUtils.getTestDirName(TestOfflineEditsViewer.class);
private static String cacheDir =
System.getProperty("test.cache.data", "build/test/cache");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java
index a5501d97547..c7d3b31dacf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java
@@ -27,6 +27,7 @@ import java.io.FileReader;
import java.io.IOException;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
+import org.apache.hadoop.test.PathUtils;
import org.junit.Test;
/**
@@ -34,7 +35,7 @@ import org.junit.Test;
* on predetermined inputs
*/
public class TestDelimitedImageVisitor {
- private static String ROOT = System.getProperty("test.build.data","/tmp");
+ private static String ROOT = PathUtils.getTestDirName(TestDelimitedImageVisitor.class);
private static final String delim = "--";
// Record an element in the visitor and build the expected line in the output
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 50e816417d3..11aa3b821f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.PathUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -88,8 +89,7 @@ public class TestOfflineImageViewer {
final static HashMap writtenFiles =
new HashMap();
- private static String ROOT = System.getProperty("test.build.data",
- "build/test/data");
+ private static String ROOT = PathUtils.getTestDirName(TestOfflineImageViewer.class);
// Create a populated namespace for later testing. Save its contents to a
// data structure and store its fsimage location.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
index ebbb4e22701..2a9465aa90c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
@@ -30,6 +30,7 @@ import java.io.OutputStream;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.test.PathUtils;
import org.junit.Before;
import org.junit.Test;
@@ -40,10 +41,7 @@ public class TestAtomicFileOutputStream {
private static final String TEST_STRING = "hello world";
private static final String TEST_STRING_2 = "goodbye world";
- private static File BASE_DIR = new File(
- System.getProperty("test.build.data", "build/test/data"));
- private static File TEST_DIR = new File(BASE_DIR,
- TestAtomicFileOutputStream.class.getName());
+ private static File TEST_DIR = PathUtils.getTestDir(TestAtomicFileOutputStream.class);
private static File DST_FILE = new File(TEST_DIR, "test.txt");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
index 6f5b1613360..35fa46d20cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
@@ -29,14 +29,12 @@ import java.io.IOException;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.test.PathUtils;
import org.junit.Before;
import org.junit.Test;
public class TestMD5FileUtils {
- private static final File TEST_DIR_ROOT = new File(
- System.getProperty("test.build.data","build/test/data"));
- private static final File TEST_DIR = new File(TEST_DIR_ROOT,
- "TestMD5FileUtils");
+ private static final File TEST_DIR = PathUtils.getTestDir(TestMD5FileUtils.class);
private static final File TEST_FILE = new File(TEST_DIR,
"testMd5File.dat");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index 2071f6feb87..7a007a05928 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
@@ -58,7 +58,6 @@ public class TestWebHdfsTimeouts {
private static final int CLIENTS_TO_CONSUME_BACKLOG = 100;
private static final int CONNECTION_BACKLOG = 1;
- private static final int INITIAL_SOCKET_TIMEOUT = URLUtils.SOCKET_TIMEOUT;
private static final int SHORT_SOCKET_TIMEOUT = 5;
private static final int TEST_TIMEOUT = 10000;
@@ -67,20 +66,22 @@ public class TestWebHdfsTimeouts {
private InetSocketAddress nnHttpAddress;
private ServerSocket serverSocket;
private Thread serverThread;
+ private URLConnectionFactory connectionFactory = new URLConnectionFactory(SHORT_SOCKET_TIMEOUT);
@Before
public void setUp() throws Exception {
- URLUtils.SOCKET_TIMEOUT = SHORT_SOCKET_TIMEOUT;
Configuration conf = WebHdfsTestUtil.createConf();
nnHttpAddress = NameNode.getHttpAddress(conf);
serverSocket = new ServerSocket(nnHttpAddress.getPort(), CONNECTION_BACKLOG);
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
+ fs.connectionFactory = connectionFactory;
clients = new ArrayList();
serverThread = null;
}
@After
public void tearDown() throws Exception {
+ fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()]));
IOUtils.cleanup(LOG, fs);
if (serverSocket != null) {
@@ -240,7 +241,7 @@ public class TestWebHdfsTimeouts {
*/
private void startSingleTemporaryRedirectResponseThread(
final boolean consumeConnectionBacklog) {
- URLUtils.SOCKET_TIMEOUT = INITIAL_SOCKET_TIMEOUT;
+ fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
serverThread = new Thread() {
@Override
public void run() {
@@ -254,7 +255,7 @@ public class TestWebHdfsTimeouts {
clientSocket = serverSocket.accept();
// Immediately setup conditions for subsequent connections.
- URLUtils.SOCKET_TIMEOUT = SHORT_SOCKET_TIMEOUT;
+ fs.connectionFactory = connectionFactory;
if (consumeConnectionBacklog) {
consumeConnectionBacklog();
}
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8dee23a467e..bc7e802a2db 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -713,7 +713,7 @@
org.apache.maven.plugins
maven-surefire-plugin
- 2.12.3
+ 2.16
org.apache.maven.plugins
@@ -827,7 +827,7 @@
org.apache.maven.plugins
maven-surefire-plugin
- always
+ false
900
-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError
@@ -1002,23 +1002,5 @@
-