diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 1fd47340cef..c2bccbe513a 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -705,9 +705,7 @@ org.apache.maven.plugins maven-surefire-plugin - perthread - ${testsThreadCount} - classes + ${testsThreadCount} -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4e560ffc2b3..f69880bd262 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -29,6 +29,8 @@ Release 2.3.0 - UNRELEASED HDFS-5144. Document time unit to NameNodeMetrics. (Akira Ajisaka via suresh) + HDFS-4491. Parallel testing HDFS. (Andrey Klochkov via cnauroth) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index e9310399273..d0d13590938 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -614,5 +614,44 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> + + parallel-tests + + + + maven-antrun-plugin + + + create-parallel-tests-dirs + test-compile + + + + + + + + + + run + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + ${testsThreadCount} + -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true + + ${test.build.data}/${surefire.forkNumber} + ${hadoop.tmp.dir}/${surefire.forkNumber} + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java index 9d9cde4dc02..dd5e9c6daa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher; -import org.apache.hadoop.hdfs.web.URLUtils; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; @@ -86,6 +86,8 @@ public class HftpFileSystem extends FileSystem HttpURLConnection.setFollowRedirects(true); } + URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + public static final Text TOKEN_KIND = new Text("HFTP delegation"); protected UserGroupInformation ugi; @@ -331,8 +333,8 @@ protected HttpURLConnection openConnection(String path, String query) throws IOException { query = addDelegationTokenParam(query); final URL url = getNamenodeURL(path, query); - final HttpURLConnection connection = - (HttpURLConnection)URLUtils.openConnection(url); + final HttpURLConnection connection; + connection = (HttpURLConnection)connectionFactory.openConnection(url); connection.setRequestMethod("GET"); connection.connect(); return connection; @@ -352,12 +354,14 @@ protected String addDelegationTokenParam(String query) throws IOException { } static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener { + URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + RangeHeaderUrlOpener(final URL url) { super(url); } protected HttpURLConnection openConnection() throws IOException { - return (HttpURLConnection)URLUtils.openConnection(url); + return (HttpURLConnection)connectionFactory.openConnection(url); } /** Use HTTP Range header for specifying offset. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java index 438d56e52f0..6a3bdba593b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java @@ -40,7 +40,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.web.URLUtils; import org.apache.hadoop.util.Time; /** @@ -154,7 +153,8 @@ protected HttpURLConnection openConnection(String path, String query) query = addDelegationTokenParam(query); final URL url = new URL("https", nnUri.getHost(), nnUri.getPort(), path + '?' + query); - HttpsURLConnection conn = (HttpsURLConnection)URLUtils.openConnection(url); + HttpsURLConnection conn; + conn = (HttpsURLConnection)connectionFactory.openConnection(url); // bypass hostname verification conn.setHostnameVerifier(new DummyHostnameVerifier()); conn.setRequestMethod("GET"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java similarity index 73% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLUtils.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java index 09feaf5bec4..54aab04e58c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java @@ -30,19 +30,27 @@ */ @InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceStability.Unstable -public class URLUtils { +public class URLConnectionFactory { /** * Timeout for socket connects and reads */ - public static int SOCKET_TIMEOUT = 1*60*1000; // 1 minute + public final static int DEFAULT_SOCKET_TIMEOUT = 1*60*1000; // 1 minute + public static final URLConnectionFactory DEFAULT_CONNECTION_FACTORY = new URLConnectionFactory(DEFAULT_SOCKET_TIMEOUT); + + private int socketTimeout; + + public URLConnectionFactory(int socketTimeout) { + this.socketTimeout = socketTimeout; + } + /** * Opens a url with read and connect timeouts * @param url to open * @return URLConnection * @throws IOException */ - public static URLConnection openConnection(URL url) throws IOException { + public URLConnection openConnection(URL url) throws IOException { URLConnection connection = url.openConnection(); setTimeouts(connection); return connection; @@ -53,8 +61,8 @@ public static URLConnection openConnection(URL url) throws IOException { * * @param connection URLConnection to set */ - static void setTimeouts(URLConnection connection) { - connection.setConnectTimeout(SOCKET_TIMEOUT); - connection.setReadTimeout(SOCKET_TIMEOUT); + public void setTimeouts(URLConnection connection) { + connection.setConnectTimeout(socketTimeout); + connection.setReadTimeout(socketTimeout); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 2777e298dc1..f76c7683fd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -121,13 +121,15 @@ public class WebHdfsFileSystem extends FileSystem /** SPNEGO authenticator */ private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator(); + /** Default connection factory may be overriden in tests to use smaller timeout values */ + URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; /** Configures connections for AuthenticatedURL */ - private static final ConnectionConfigurator CONN_CONFIGURATOR = + private final ConnectionConfigurator CONN_CONFIGURATOR = new ConnectionConfigurator() { @Override public HttpURLConnection configure(HttpURLConnection conn) throws IOException { - URLUtils.setTimeouts(conn); + connectionFactory.setTimeouts(conn); return conn; } }; @@ -481,10 +483,9 @@ private HttpURLConnection openHttpUrlConnection(final URL url) final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token(); conn = new AuthenticatedURL(AUTH, CONN_CONFIGURATOR).openConnection( url, authToken); - URLUtils.setTimeouts(conn); } else { LOG.debug("open URL connection"); - conn = (HttpURLConnection)URLUtils.openConnection(url); + conn = (HttpURLConnection)connectionFactory.openConnection(url); } } catch (AuthenticationException e) { throw new IOException(e); @@ -579,7 +580,7 @@ HttpURLConnection twoStepWrite() throws IOException { checkRetry = false; //Step 2) Submit another Http request with the URL from the Location header with data. - conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect)); + conn = (HttpURLConnection)connectionFactory.openConnection(new URL(redirect)); conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM); conn.setChunkedStreamingMode(32 << 10); //32kB-chunk connect(); @@ -602,7 +603,7 @@ void getResponse(boolean getJsonAndDisconnect) throws IOException { disconnect(); checkRetry = false; - conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect)); + conn = (HttpURLConnection)connectionFactory.openConnection(new URL(redirect)); connect(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java index 794c0571944..5ad39304da3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs; +import java.io.File; import java.io.IOException; import java.util.EnumSet; @@ -27,6 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.test.PathUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -56,8 +58,7 @@ public class TestFiRename { private static String addChild = ""; private static byte[] data = { 0 }; - private static String TEST_ROOT_DIR = - System.getProperty("test.build.data", "/tmp") + "/test"; + private static String TEST_ROOT_DIR = PathUtils.getTestDirName(TestFiRename.class); private static Configuration CONF = new Configuration(); static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java index c48759e1f5e..cfd4a8d418c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs; +import java.io.File; import static org.junit.Assert.fail; import java.io.FileNotFoundException; @@ -36,6 +37,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; import org.junit.Assert; @@ -48,6 +50,7 @@ * underlying file system as Hdfs. */ public class TestResolveHdfsSymlink { + private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestResolveHdfsSymlink.class); private static MiniDFSCluster cluster = null; @BeforeClass @@ -80,12 +83,12 @@ public void testFcResolveAfs() throws IOException, InterruptedException { .getUri()); Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri() - .toString(), "/tmp/alpha"); + .toString(), new File(TEST_ROOT_DIR, "alpha").getAbsolutePath()); DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16, (short) 1, 2); Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri() - .toString(), "/tmp"); + .toString(), TEST_ROOT_DIR.getAbsolutePath()); Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(), "/tmp/link"); fcHdfs.createSymlink(linkTarget, hdfsLink, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java index d28736cffae..845eb6314ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -38,6 +39,8 @@ */ public class TestUrlStreamHandler { + private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestUrlStreamHandler.class); + /** * Test opening and reading from an InputStream through a hdfs:// URL. *

@@ -111,13 +114,12 @@ public void testFileUrls() throws IOException, URISyntaxException { Configuration conf = new HdfsConfiguration(); // Locate the test temporary directory. - File tmpDir = new File(conf.get("hadoop.tmp.dir")); - if (!tmpDir.exists()) { - if (!tmpDir.mkdirs()) - throw new IOException("Cannot create temporary directory: " + tmpDir); + if (!TEST_ROOT_DIR.exists()) { + if (!TEST_ROOT_DIR.mkdirs()) + throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR); } - File tmpFile = new File(tmpDir, "thefile"); + File tmpFile = new File(TEST_ROOT_DIR, "thefile"); URI uri = tmpFile.toURI(); FileSystem fs = FileSystem.get(uri, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java index e9e14ce8b11..6c7bac31ea8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -39,8 +40,7 @@ public class TestLoadGenerator extends Configured implements Tool { private static final Configuration CONF = new HdfsConfiguration(); private static final int DEFAULT_BLOCK_SIZE = 10; - private static final String OUT_DIR = - System.getProperty("test.build.data","build/test/data"); + private static final File OUT_DIR = PathUtils.getTestDir(TestLoadGenerator.class); private static final File DIR_STRUCTURE_FILE = new File(OUT_DIR, StructureGenerator.DIR_STRUCTURE_FILE_NAME); private static final File FILE_STRUCTURE_FILE = @@ -65,7 +65,7 @@ public void testStructureGenerator() throws Exception { StructureGenerator sg = new StructureGenerator(); String[] args = new String[]{"-maxDepth", "2", "-minWidth", "1", "-maxWidth", "2", "-numOfFiles", "2", - "-avgFileSize", "1", "-outDir", OUT_DIR, "-seed", "1"}; + "-avgFileSize", "1", "-outDir", OUT_DIR.getAbsolutePath(), "-seed", "1"}; final int MAX_DEPTH = 1; final int MIN_WIDTH = 3; @@ -133,8 +133,7 @@ public void testStructureGenerator() throws Exception { public void testLoadGenerator() throws Exception { final String TEST_SPACE_ROOT = "/test"; - final String SCRIPT_TEST_DIR = new File(System.getProperty("test.build.data", - "/tmp")).getAbsolutePath(); + final String SCRIPT_TEST_DIR = OUT_DIR.getAbsolutePath(); String script = SCRIPT_TEST_DIR + "/" + "loadgenscript"; String script2 = SCRIPT_TEST_DIR + "/" + "loadgenscript2"; File scriptFile1 = new File(script); @@ -156,7 +155,7 @@ public void testLoadGenerator() throws Exception { try { DataGenerator dg = new DataGenerator(); dg.setConf(CONF); - String [] args = new String[] {"-inDir", OUT_DIR, "-root", TEST_SPACE_ROOT}; + String [] args = new String[] {"-inDir", OUT_DIR.getAbsolutePath(), "-root", TEST_SPACE_ROOT}; assertEquals(0, dg.run(args)); final int READ_PROBABILITY = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index a611ff9e98e..d4314026644 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -1405,6 +1405,13 @@ public int getNameNodeServicePort(int nnIndex) { * Shutdown all the nodes in the cluster. */ public void shutdown() { + shutdown(false); + } + + /** + * Shutdown all the nodes in the cluster. + */ + public void shutdown(boolean deleteDfsDir) { LOG.info("Shutting down the Mini HDFS Cluster"); if (checkExitOnShutdown) { if (ExitUtil.terminateCalled()) { @@ -1424,6 +1431,11 @@ public void shutdown() { nameNode = null; } } + if (deleteDfsDir) { + base_dir.delete(); + } else { + base_dir.deleteOnExit(); + } } /** @@ -2116,7 +2128,7 @@ public File getInstanceStorageDir(int dnIndex, int dirIndex) { *

  • /data/data<2*dnIndex + 1>
  • *
  • /data/data<2*dnIndex + 2>
  • * - * + * * @param dnIndex datanode index (starts from 0) * @param dirIndex directory index (0 or 1). Index 0 provides access to the * first storage directory. Index 1 provides access to the second @@ -2147,7 +2159,7 @@ private static String getStorageDirPath(int dnIndex, int dirIndex) { public static String getDNCurrentDir(File storageDir) { return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/"; } - + /** * Get directory corresponding to block pool directory in the datanode * @param storageDir the storage directory of a datanode. @@ -2253,7 +2265,7 @@ public static File getBlockFile(int dnIndex, ExtendedBlock block) { } return null; } - + /** * Get the block metadata file for a block from a given datanode * @@ -2341,14 +2353,17 @@ protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile, } else { if (checkDataNodeAddrConfig) { conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); } else { conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); } } + if (checkDataNodeAddrConfig) { + conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); + } else { + conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); + } } private void addToFile(String p, String address) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java index 51fab6653f3..74c763d5aa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java @@ -71,10 +71,6 @@ public class TestClientReportBadBlock { @Before public void startUpCluster() throws IOException { - if (System.getProperty("test.build.data") == null) { // to allow test to be - // run outside of Ant - System.setProperty("test.build.data", "build/test/data"); - } // disable block scanner conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index e54e2777c44..fbcce3946ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -20,9 +20,6 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; import static org.junit.Assert.*; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index bd749442894..50580f4957d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -44,6 +44,7 @@ import org.apache.hadoop.io.compress.BZip2Codec; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; @@ -61,9 +62,7 @@ public class TestDFSShell { private static final Log LOG = LogFactory.getLog(TestDFSShell.class); private static AtomicInteger counter = new AtomicInteger(); - static final String TEST_ROOT_DIR = - new Path(System.getProperty("test.build.data","/tmp")) - .toString().replace(' ', '+'); + static final String TEST_ROOT_DIR = PathUtils.getTestDirName(TestDFSShell.class); static Path writeFile(FileSystem fs, Path f) throws IOException { DataOutputStream out = fs.create(f); @@ -482,12 +481,11 @@ public void testURIPaths() throws Exception { Configuration dstConf = new HdfsConfiguration(); MiniDFSCluster srcCluster = null; MiniDFSCluster dstCluster = null; - String bak = System.getProperty("test.build.data"); + File bak = new File(PathUtils.getTestDir(getClass()), "dfs_tmp_uri"); + bak.mkdirs(); try{ srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build(); - File nameDir = new File(new File(bak), "dfs_tmp_uri/"); - nameDir.mkdirs(); - System.setProperty("test.build.data", nameDir.toString()); + dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, bak.getAbsolutePath()); dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build(); FileSystem srcFs = srcCluster.getFileSystem(); FileSystem dstFs = dstCluster.getFileSystem(); @@ -559,7 +557,6 @@ public void testURIPaths() throws Exception { ret = ToolRunner.run(shell, argv); assertEquals("default works for rm/rmr", 0, ret); } finally { - System.setProperty("test.build.data", bak); if (null != srcCluster) { srcCluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index f6388fb3213..d241033aed8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.HostFileManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.test.PathUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -76,7 +77,7 @@ public void setup() throws IOException { // Set up the hosts/exclude files. localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); - Path dir = new Path(workingDir, System.getProperty("test.build.data", "target/test/data") + "/work-dir/decommission"); + Path dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission"); hostsFile = new Path(dir, "hosts"); excludeFile = new Path(dir, "exclude"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index f0be91a18a6..b8bfd7efaef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -23,6 +23,8 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + +import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; @@ -70,6 +72,9 @@ private HdfsConfiguration getTestConfiguration() { HdfsConfiguration conf; if (noXmlDefaults) { conf = new HdfsConfiguration(false); + String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name").getAbsolutePath(); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); + conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); } else { conf = new HdfsConfiguration(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java index 2f2c9a4fd6e..5fc567a2131 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -260,7 +261,7 @@ private void testFileCorruption(LocalFileSystem fileSys) throws IOException { // create a file and verify that checksum corruption results in // a checksum exception on LocalFS - String dir = System.getProperty("test.build.data", "."); + String dir = PathUtils.getTestDirName(getClass()); Path file = new Path(dir + "/corruption-test.dat"); Path crcFile = new Path(dir + "/.corruption-test.dat.crc"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java index d086c77a9bf..570b19f8d2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -145,7 +146,7 @@ public void testLoadLogsFromBuggyEarlierVersions() throws IOException { String tarFile = System.getProperty("test.cache.data", "build/test/cache") + "/" + HADOOP_23_BROKEN_APPEND_TGZ; - String testDir = System.getProperty("test.build.data", "build/test/data"); + String testDir = PathUtils.getTestDirName(getClass()); File dfsDir = new File(testDir, "image-with-buggy-append"); if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) { throw new IOException("Could not delete dfs directory '" + dfsDir + "'"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index 458880af566..6531fe7c050 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.test.PathUtils; import org.apache.log4j.Level; import org.junit.Test; @@ -95,7 +96,7 @@ public void testFileCorruption() throws Exception { @Test public void testLocalFileCorruption() throws Exception { Configuration conf = new HdfsConfiguration(); - Path file = new Path(System.getProperty("test.build.data"), "corruptFile"); + Path file = new Path(PathUtils.getTestDirName(getClass()), "corruptFile"); FileSystem fs = FileSystem.getLocal(conf); DataOutputStream dos = fs.create(file); dos.writeBytes("original bytes"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index 036252ddc31..59d1615025d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.DNS; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -53,6 +54,9 @@ public class TestHDFSServerPorts { // reset default 0.0.0.0 addresses in order to avoid IPv6 problem static final String THIS_HOST = getFullHostName() + ":0"; + + private static final File TEST_DATA_DIR = PathUtils.getTestDir(TestHDFSServerPorts.class); + static { DefaultMetricsSystem.setMiniClusterMode(true); } @@ -81,13 +85,6 @@ public static String getFullHostName() { } } - /** - * Get base directory these tests should run in. - */ - private String getTestingDir() { - return System.getProperty("test.build.data", "build/test/data"); - } - public NameNode startNameNode() throws IOException { return startNameNode(false); } @@ -95,8 +92,7 @@ public NameNode startNameNode() throws IOException { * Start the namenode. */ public NameNode startNameNode(boolean withService) throws IOException { - String dataDir = getTestingDir(); - hdfsDir = new File(dataDir, "dfs"); + hdfsDir = new File(TEST_DATA_DIR, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } @@ -119,9 +115,8 @@ public NameNode startNameNode(boolean withService) throws IOException { * Start the BackupNode */ public BackupNode startBackupNode(Configuration conf) throws IOException { - String dataDir = getTestingDir(); // Set up testing environment directories - hdfsDir = new File(dataDir, "backupNode"); + hdfsDir = new File(TEST_DATA_DIR, "backupNode"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } @@ -150,8 +145,7 @@ public BackupNode startBackupNode(Configuration conf) throws IOException { */ public DataNode startDataNode(int index, Configuration config) throws IOException { - String dataDir = getTestingDir(); - File dataNodeDir = new File(dataDir, "data-" + index); + File dataNodeDir = new File(TEST_DATA_DIR, "data-" + index); config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath()); String[] args = new String[] {}; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java index d9a22c10111..56bd21ef2ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java @@ -33,16 +33,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdfs.web.URLUtils; -import org.junit.BeforeClass; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.junit.Test; public class TestHftpURLTimeouts { - @BeforeClass - public static void setup() { - URLUtils.SOCKET_TIMEOUT = 5; - } - + @Test public void testHftpSocketTimeout() throws Exception { Configuration conf = new Configuration(); @@ -51,9 +46,11 @@ public void testHftpSocketTimeout() throws Exception { InetAddress.getByName(null).getHostAddress(), socket.getLocalPort(), null, null, null); - boolean timedout = false; HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf); + fs.connectionFactory = new URLConnectionFactory(5); + + boolean timedout = false; try { HttpURLConnection conn = fs.openConnection("/", ""); timedout = false; @@ -69,6 +66,7 @@ public void testHftpSocketTimeout() throws Exception { assertTrue("read timedout", timedout); assertTrue("connect timedout", checkConnectTimeout(fs, false)); } finally { + fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; fs.close(); } } @@ -84,6 +82,8 @@ public void testHsftpSocketTimeout() throws Exception { boolean timedout = false; HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf); + fs.connectionFactory = new URLConnectionFactory(5); + try { HttpURLConnection conn = null; timedout = false; @@ -100,6 +100,7 @@ public void testHsftpSocketTimeout() throws Exception { assertTrue("ssl read connect timedout", timedout); assertTrue("connect timedout", checkConnectTimeout(fs, true)); } finally { + fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; fs.close(); } } @@ -121,7 +122,7 @@ private boolean checkConnectTimeout(HftpFileSystem fs, boolean ignoreReadTimeout // https will get a read timeout due to SSL negotiation, but // a normal http will not, so need to ignore SSL read timeouts // until a connect timeout occurs - if (!(ignoreReadTimeout && message.equals("Read timed out"))) { + if (!(ignoreReadTimeout && "Read timed out".equals(message))) { timedout = true; assertEquals("connect timed out", message); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index 1400f07e062..84678da2d40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -25,8 +25,8 @@ import java.io.File; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.junit.After; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -43,20 +43,10 @@ public class TestMiniDFSCluster { private static final String CLUSTER_3 = "cluster3"; private static final String CLUSTER_4 = "cluster4"; private static final String CLUSTER_5 = "cluster5"; - protected String testDataPath; - protected File testDataDir; + protected File testDataPath; @Before public void setUp() { - testDataPath = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, - "build/test/data"); - testDataDir = new File(new File(testDataPath).getParentFile(), - "miniclusters"); - - - } - @After - public void tearDown() { - System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testDataPath); + testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters"); } /** @@ -120,7 +110,7 @@ public void testIsClusterUpAfterShutdown() throws Throwable { MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build(); try { DistributedFileSystem dfs = (DistributedFileSystem) cluster4.getFileSystem(); - dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER); + dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); cluster4.shutdown(); } finally { while(cluster4.isClusterUp()){ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java index 497d29d7380..a90da870d0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.log4j.Level; import org.junit.Test; @@ -304,7 +305,7 @@ public void testEarlierVersionEditLog() throws Exception { String tarFile = System.getProperty("test.cache.data", "build/test/cache") + "/" + HADOOP_1_0_MULTIBLOCK_TGZ; - String testDir = System.getProperty("test.build.data", "build/test/data"); + String testDir = PathUtils.getTestDirName(getClass()); File dfsDir = new File(testDir, "image-1.0"); if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) { throw new IOException("Could not delete dfs directory '" + dfsDir + "'"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java index cfe463c4356..a7eed468e62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java @@ -174,6 +174,7 @@ public void testNewNamenodeTakesOverWriter() throws Exception { public void testMismatchedNNIsRejected() throws Exception { conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image"); + String defaultEditsDir = conf.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc.getQuorumJournalURI("myjournal").toString()); @@ -187,7 +188,7 @@ public void testMismatchedNNIsRejected() throws Exception { // Reformat just the on-disk portion Configuration onDiskOnly = new Configuration(conf); - onDiskOnly.unset(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY); + onDiskOnly.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, defaultEditsDir); NameNode.format(onDiskOnly); // Start the NN - should fail because the JNs are still formatted diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index e6e140443bf..79ca6ca72bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -46,6 +46,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Shell; import org.junit.After; import org.junit.Before; @@ -61,13 +62,13 @@ public class TestJournalNode { private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo( 12345, "mycluster", "my-bp", 0L); + private static File TEST_BUILD_DATA = PathUtils.getTestDir(TestJournalNode.class); + private JournalNode jn; private Journal journal; private Configuration conf = new Configuration(); private IPCLoggerChannel ch; private String journalId; - private File TEST_BUILD_DATA = - new File(System.getProperty("test.build.data", "build/test/data")); static { // Avoid an error when we double-initialize JvmMetrics diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java index 79785961c91..972a785f5d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.io.File; -import java.io.IOException; import java.util.Collection; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index b79c9f88cac..a77507ec7a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -84,8 +85,7 @@ public static void setupCluster() throws Exception { FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - File baseDir = new File(System.getProperty( - "test.build.data", "build/test/data"), "dfs/"); + File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index 4731927ce94..54c0c80b58f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.File; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -38,6 +37,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; +import org.apache.hadoop.test.PathUtils; +import org.junit.After; +import org.junit.Before; import org.junit.Test; public class TestReplicationPolicyWithNodeGroup extends TestCase { @@ -45,10 +47,10 @@ public class TestReplicationPolicyWithNodeGroup extends TestCase { private static final int NUM_OF_DATANODES = 8; private static final int NUM_OF_DATANODES_BOUNDARY = 6; private static final int NUM_OF_DATANODES_MORE_TARGETS = 12; - private static final Configuration CONF = new HdfsConfiguration(); - private static final NetworkTopology cluster; - private static final NameNode namenode; - private static final BlockPlacementPolicy replicator; + private final Configuration CONF = new HdfsConfiguration(); + private NetworkTopology cluster; + private NameNode namenode; + private BlockPlacementPolicy replicator; private static final String filename = "/dummyfile.txt"; private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] { @@ -91,27 +93,23 @@ public class TestReplicationPolicyWithNodeGroup extends TestCase { private final static DatanodeDescriptor NODE = new DatanodeDescriptor(DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d2/r4/n7")); - static { - try { - FileSystem.setDefaultUri(CONF, "hdfs://localhost:0"); - CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - // Set properties to make HDFS aware of NodeGroup. - CONF.set("dfs.block.replicator.classname", - "org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup"); - CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, - "org.apache.hadoop.net.NetworkTopologyWithNodeGroup"); - - File baseDir = new File(System.getProperty( - "test.build.data", "build/test/data"), "dfs/"); - CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, - new File(baseDir, "name").getPath()); - - DFSTestUtil.formatNameNode(CONF); - namenode = new NameNode(CONF); - } catch (IOException e) { - e.printStackTrace(); - throw (RuntimeException)new RuntimeException().initCause(e); - } + @Before + public void setUp() throws Exception { + FileSystem.setDefaultUri(CONF, "hdfs://localhost:0"); + CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + // Set properties to make HDFS aware of NodeGroup. + CONF.set("dfs.block.replicator.classname", + "org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup"); + CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, + "org.apache.hadoop.net.NetworkTopologyWithNodeGroup"); + + File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class); + + CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + new File(baseDir, "name").getPath()); + + DFSTestUtil.formatNameNode(CONF); + namenode = new NameNode(CONF); final BlockManager bm = namenode.getNamesystem().getBlockManager(); replicator = bm.getBlockPlacementPolicy(); cluster = bm.getDatanodeManager().getNetworkTopology(); @@ -122,6 +120,11 @@ public class TestReplicationPolicyWithNodeGroup extends TestCase { setupDataNodeCapacity(); } + @After + public void tearDown() throws Exception { + namenode.stop(); + } + private static void setupDataNodeCapacity() { for(int i=0; i fsImageDirs = new ArrayList(); ArrayList editsDirs = new ArrayList(); File filePath = - new File(System.getProperty("test.build.data","/tmp"), "storageDirToCheck"); + new File(PathUtils.getTestDir(getClass()), "storageDirToCheck"); assertTrue("Couldn't create directory storageDirToCheck", filePath.exists() || filePath.mkdirs()); fsImageDirs.add(filePath.toURI()); @@ -1914,9 +1915,11 @@ public void testReformatNNBetweenCheckpoints() throws IOException { } // Start a new NN with the same host/port. - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) - .nameNodePort(origPort).nameNodeHttpPort(origHttpPort).format(true) - .build(); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .nameNodePort(origPort) + .nameNodeHttpPort(origHttpPort) + .format(true).build(); try { secondary.doCheckpoint(); @@ -2138,7 +2141,8 @@ public void testCheckpointTriggerOnTxnCount() throws Exception { conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) .format(true).build(); FileSystem fs = cluster.getFileSystem(); secondary = startSecondaryNameNode(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java index 4330317d6ff..7c23dd55df5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil.ExitException; import org.junit.After; @@ -72,7 +73,7 @@ private String getClusterId(Configuration config) throws IOException { public void setUp() throws IOException { ExitUtil.disableSystemExit(); - String baseDir = System.getProperty("test.build.data", "build/test/data"); + String baseDir = PathUtils.getTestDirName(getClass()); hdfsDir = new File(baseDir, "dfs/name"); if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 98e980d69e0..2ce9bb54609 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; @@ -96,9 +97,8 @@ public class TestEditLog { static final int NUM_TRANSACTIONS = 100; static final int NUM_THREADS = 100; - static final File TEST_DIR = new File( - System.getProperty("test.build.data","build/test/data")); - + static final File TEST_DIR = PathUtils.getTestDir(TestEditLog.class); + /** An edits log with 3 edits from 0.20 - the result of * a fresh namesystem followed by hadoop fs -touchz /myfile */ static final byte[] HADOOP20_SOME_EDITS = @@ -569,6 +569,7 @@ public void testEditChecksum() throws Exception { fail("should not be able to start"); } catch (IOException e) { // expected + assertNotNull("Cause of exception should be ChecksumException", e.getCause()); assertEquals("Cause of exception should be ChecksumException", ChecksumException.class, e.getCause().getClass()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java index 4f737ad076d..2be1dc62377 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java @@ -24,6 +24,7 @@ import java.io.File; import java.io.IOException; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.junit.After; @@ -34,8 +35,7 @@ * Test the EditLogFileOutputStream */ public class TestEditLogFileOutputStream { - private final static File TEST_DIR = - new File(System.getProperty("test.build.data", "/tmp")); + private final static File TEST_DIR = PathUtils.getTestDir(TestEditLogFileOutputStream.class); private static final File TEST_EDITS = new File(TEST_DIR, "testEditLogFileOutput.log"); final static int MIN_PREALLOCATION_LENGTH = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java index 486b17c3925..610a4a2fd4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil.ExitException; import org.junit.After; import org.junit.Before; @@ -192,8 +193,7 @@ public void testMultipleRedundantFailedEditsDirOnSetReadyToFlush() Configuration conf = new HdfsConfiguration(); String[] nameDirs = new String[4]; for (int i = 0; i < nameDirs.length; i++) { - File nameDir = new File(System.getProperty("test.build.data"), - "name-dir" + i); + File nameDir = new File(PathUtils.getTestDir(getClass()), "name-dir" + i); nameDir.mkdirs(); nameDirs[i] = nameDir.getAbsolutePath(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index dd637a9212d..4db7e6a7dfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.log4j.Level; import org.junit.Test; @@ -60,8 +61,7 @@ public class TestFSEditLogLoader { ((Log4JLogger)FSEditLogLoader.LOG).getLogger().setLevel(Level.ALL); } - private static final File TEST_DIR = new File( - System.getProperty("test.build.data","build/test/data")); + private static final File TEST_DIR = PathUtils.getTestDir(TestFSEditLogLoader.class); private static final int NUM_DATA_NODES = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 754e56966d3..9aaeb74a1c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -91,7 +91,7 @@ */ public class TestFsck { static final String auditLogFile = System.getProperty("test.build.dir", - "build/test") + "/audit.log"; + "build/test") + "/TestFsck-audit.log"; // Pattern for: // allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null @@ -159,7 +159,8 @@ public void testFsck() throws Exception { cluster.shutdown(); // restart the cluster; bring up namenode but not the data nodes - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build(); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0).format(false).build(); outStr = runFsck(conf, 1, true, "/"); // expect the result is corrupt assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 659a4d3477b..0ca2f849c54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -62,7 +63,7 @@ public class TestNameEditsConfigs { short replication = 3; private File base_dir = new File( - System.getProperty("test.build.data", "build/test/data"), "dfs/"); + PathUtils.getTestDir(TestNameEditsConfigs.class), "dfs"); @Before public void setUp() throws IOException { @@ -70,7 +71,7 @@ public void setUp() throws IOException { throw new IOException("Cannot remove directory " + base_dir); } } - + private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index 8463eb963ec..7d6827f9809 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; import org.junit.Test; @@ -57,8 +58,7 @@ public class TestNameNodeRecovery { private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class); private static StartupOption recoverStartOpt = StartupOption.RECOVER; - private static final File TEST_DIR = new File( - System.getProperty("test.build.data","build/test/data")); + private static final File TEST_DIR = PathUtils.getTestDir(TestNameNodeRecovery.class); static { recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java index e73d71aff70..2012b6aabe1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java @@ -33,12 +33,14 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor; import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Time; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; public class TestNameNodeResourceChecker { + private final static File BASE_DIR = PathUtils.getTestDir(TestNameNodeResourceChecker.class); private Configuration conf; private File baseDir; private File nameDir; @@ -46,8 +48,7 @@ public class TestNameNodeResourceChecker { @Before public void setUp () throws IOException { conf = new Configuration(); - baseDir = new File(System.getProperty("test.build.data")); - nameDir = new File(baseDir, "resource-check-name-dir"); + nameDir = new File(BASE_DIR, "resource-check-name-dir"); nameDir.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath()); } @@ -141,8 +142,8 @@ public void testCheckThatNameNodeResourceMonitorIsRunning() @Test public void testChecking2NameDirsOnOneVolume() throws IOException { Configuration conf = new Configuration(); - File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1"); - File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2"); + File nameDir1 = new File(BASE_DIR, "name-dir1"); + File nameDir2 = new File(BASE_DIR, "name-dir2"); nameDir1.mkdirs(); nameDir2.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, @@ -162,7 +163,7 @@ public void testChecking2NameDirsOnOneVolume() throws IOException { @Test public void testCheckingExtraVolumes() throws IOException { Configuration conf = new Configuration(); - File nameDir = new File(System.getProperty("test.build.data"), "name-dir"); + File nameDir = new File(BASE_DIR, "name-dir"); nameDir.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath()); @@ -182,8 +183,8 @@ public void testCheckingExtraVolumes() throws IOException { @Test public void testLowResourceVolumePolicy() throws IOException, URISyntaxException { Configuration conf = new Configuration(); - File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1"); - File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2"); + File nameDir1 = new File(BASE_DIR, "name-dir1"); + File nameDir2 = new File(BASE_DIR, "name-dir2"); nameDir1.mkdirs(); nameDir2.mkdirs(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 5b384578351..625e22ce282 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Logger; import org.junit.After; @@ -419,8 +420,7 @@ public void testCompression() throws IOException { Configuration conf = new Configuration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - File base_dir = new File(System.getProperty( - "test.build.data", "build/test/data"), "dfs/"); + File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(base_dir, "name").getPath()); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java index 905e3680e0b..1a612e83ec7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java @@ -391,7 +391,8 @@ public void testStorageRestoreFailure() throws Exception { (new File(path3, "current").getAbsolutePath()) : path3.toString(); try { - cluster = new MiniDFSCluster.Builder(config).numDataNodes(0) + cluster = new MiniDFSCluster.Builder(config) + .numDataNodes(0) .manageNameDfsDirs(false).build(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java index cba634fee92..5a178d19440 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java @@ -39,6 +39,7 @@ import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.http.HttpServerFunctionalTest; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; import org.junit.Test; import org.mockito.Mockito; @@ -48,8 +49,7 @@ public class TestTransferFsImage { - private static final File TEST_DIR = new File( - System.getProperty("test.build.data","build/test/data")); + private static final File TEST_DIR = PathUtils.getTestDir(TestTransferFsImage.class); /** * Regression test for HDFS-1997. Test that, if an exception diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 678e03866d5..502c9de4096 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -56,8 +56,8 @@ public void setupCluster() throws IOException { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(20001)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(20002))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java index ce005b10a14..18972655765 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java @@ -70,13 +70,13 @@ public void setup() throws Exception { CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); - conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10003); - conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10004); + conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10023); + conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10024); MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index 8675fa3fc6f..8c61c9237e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -124,8 +124,8 @@ private static void testStandbyTriggersLogRolls(int activeIndex) // Have to specify IPC ports so the NNs can talk to each other. MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032))); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java index 37c0b16fabd..5ec7f7e1c1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java @@ -76,8 +76,8 @@ public void setUpCluster() throws Exception { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10041)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10042))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java index 49d89592b8a..4f848dcf834 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java @@ -52,8 +52,8 @@ public void testHaFsck() throws Exception { // need some HTTP ports MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ha-nn-uri-0") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10051)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10052))); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index dff28740690..3ff5d54dc66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -89,8 +89,8 @@ public void setupCluster() throws Exception { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java index 1d189a108de..e6c9a3f3967 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper; import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -53,7 +54,7 @@ public class TestOfflineEditsViewer { } private static String buildDir = - System.getProperty("test.build.data", "build/test/data"); + PathUtils.getTestDirName(TestOfflineEditsViewer.class); private static String cacheDir = System.getProperty("test.cache.data", "build/test/cache"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java index a5501d97547..c7d3b31dacf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java @@ -27,6 +27,7 @@ import java.io.IOException; import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -34,7 +35,7 @@ * on predetermined inputs */ public class TestDelimitedImageVisitor { - private static String ROOT = System.getProperty("test.build.data","/tmp"); + private static String ROOT = PathUtils.getTestDirName(TestDelimitedImageVisitor.class); private static final String delim = "--"; // Record an element in the visitor and build the expected line in the output diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 50e816417d3..11aa3b821f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.test.PathUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -88,8 +89,7 @@ private static class LsElements { final static HashMap writtenFiles = new HashMap(); - private static String ROOT = System.getProperty("test.build.data", - "build/test/data"); + private static String ROOT = PathUtils.getTestDirName(TestOfflineImageViewer.class); // Create a populated namespace for later testing. Save its contents to a // data structure and store its fsimage location. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java index ebbb4e22701..2a9465aa90c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -40,10 +41,7 @@ public class TestAtomicFileOutputStream { private static final String TEST_STRING = "hello world"; private static final String TEST_STRING_2 = "goodbye world"; - private static File BASE_DIR = new File( - System.getProperty("test.build.data", "build/test/data")); - private static File TEST_DIR = new File(BASE_DIR, - TestAtomicFileOutputStream.class.getName()); + private static File TEST_DIR = PathUtils.getTestDir(TestAtomicFileOutputStream.class); private static File DST_FILE = new File(TEST_DIR, "test.txt"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java index 6f5b1613360..35fa46d20cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java @@ -29,14 +29,12 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; public class TestMD5FileUtils { - private static final File TEST_DIR_ROOT = new File( - System.getProperty("test.build.data","build/test/data")); - private static final File TEST_DIR = new File(TEST_DIR_ROOT, - "TestMD5FileUtils"); + private static final File TEST_DIR = PathUtils.getTestDir(TestMD5FileUtils.class); private static final File TEST_FILE = new File(TEST_DIR, "testMd5File.dat"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index 2071f6feb87..7a007a05928 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -58,7 +58,6 @@ public class TestWebHdfsTimeouts { private static final int CLIENTS_TO_CONSUME_BACKLOG = 100; private static final int CONNECTION_BACKLOG = 1; - private static final int INITIAL_SOCKET_TIMEOUT = URLUtils.SOCKET_TIMEOUT; private static final int SHORT_SOCKET_TIMEOUT = 5; private static final int TEST_TIMEOUT = 10000; @@ -67,20 +66,22 @@ public class TestWebHdfsTimeouts { private InetSocketAddress nnHttpAddress; private ServerSocket serverSocket; private Thread serverThread; + private URLConnectionFactory connectionFactory = new URLConnectionFactory(SHORT_SOCKET_TIMEOUT); @Before public void setUp() throws Exception { - URLUtils.SOCKET_TIMEOUT = SHORT_SOCKET_TIMEOUT; Configuration conf = WebHdfsTestUtil.createConf(); nnHttpAddress = NameNode.getHttpAddress(conf); serverSocket = new ServerSocket(nnHttpAddress.getPort(), CONNECTION_BACKLOG); fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); + fs.connectionFactory = connectionFactory; clients = new ArrayList(); serverThread = null; } @After public void tearDown() throws Exception { + fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()])); IOUtils.cleanup(LOG, fs); if (serverSocket != null) { @@ -240,7 +241,7 @@ public void testTwoStepWriteReadTimeout() throws Exception { */ private void startSingleTemporaryRedirectResponseThread( final boolean consumeConnectionBacklog) { - URLUtils.SOCKET_TIMEOUT = INITIAL_SOCKET_TIMEOUT; + fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; serverThread = new Thread() { @Override public void run() { @@ -254,7 +255,7 @@ public void run() { clientSocket = serverSocket.accept(); // Immediately setup conditions for subsequent connections. - URLUtils.SOCKET_TIMEOUT = SHORT_SOCKET_TIMEOUT; + fs.connectionFactory = connectionFactory; if (consumeConnectionBacklog) { consumeConnectionBacklog(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java new file mode 100644 index 00000000000..2ee4aa1390b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import java.io.File; + +import org.apache.hadoop.fs.Path; + +public class PathUtils { + + public static Path getTestPath(Class caller) { + return getTestPath(caller, true); + } + + public static Path getTestPath(Class caller, boolean create) { + return new Path(getTestDirName(caller)); + } + + public static File getTestDir(Class caller) { + return getTestDir(caller, true); + } + + public static File getTestDir(Class caller, boolean create) { + File dir = new File(System.getProperty("test.build.data", "/tmp"), caller.getSimpleName()); + if (create) { + dir.mkdirs(); + } + return dir; + } + + public static String getTestDirName(Class caller) { + return getTestDirName(caller, true); + } + + public static String getTestDirName(Class caller, boolean create) { + return getTestDir(caller, create).getAbsolutePath(); + } + +} diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 08151d9647c..0ecd5b2045c 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -712,7 +712,7 @@ org.apache.maven.plugins maven-surefire-plugin - 2.12.3 + 2.16 org.apache.maven.plugins @@ -864,7 +864,7 @@ org.apache.maven.plugins maven-surefire-plugin - always + false 900 -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError @@ -1039,23 +1039,5 @@ -