HDFS-4491. Parallel testing HDFS. Contributed by Andrey Klochkov.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520479 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
8aea748ec3
commit
5eb618ee1f
|
@ -780,9 +780,7 @@
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-surefire-plugin</artifactId>
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
<configuration>
|
<configuration>
|
||||||
<forkMode>perthread</forkMode>
|
<forkCount>${testsThreadCount}</forkCount>
|
||||||
<threadCount>${testsThreadCount}</threadCount>
|
|
||||||
<parallel>classes</parallel>
|
|
||||||
<argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true</argLine>
|
<argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true</argLine>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
|
@ -267,6 +267,8 @@ Release 2.3.0 - UNRELEASED
|
||||||
HDFS-5144. Document time unit to NameNodeMetrics. (Akira Ajisaka via
|
HDFS-5144. Document time unit to NameNodeMetrics. (Akira Ajisaka via
|
||||||
suresh)
|
suresh)
|
||||||
|
|
||||||
|
HDFS-4491. Parallel testing HDFS. (Andrey Klochkov via cnauroth)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -700,5 +700,44 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
</plugins>
|
</plugins>
|
||||||
</build>
|
</build>
|
||||||
</profile>
|
</profile>
|
||||||
|
<profile>
|
||||||
|
<id>parallel-tests</id>
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<artifactId>maven-antrun-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>create-parallel-tests-dirs</id>
|
||||||
|
<phase>test-compile</phase>
|
||||||
|
<configuration>
|
||||||
|
<target>
|
||||||
|
<exec executable="sh">
|
||||||
|
<arg value="-c"/>
|
||||||
|
<arg value="for i in {1..${testsThreadCount}}; do mkdir -p ${test.build.data}/$i; mkdir -p ${hadoop.tmp.dir}/$i; done"/>
|
||||||
|
</exec>
|
||||||
|
</target>
|
||||||
|
</configuration>
|
||||||
|
<goals>
|
||||||
|
<goal>run</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<forkCount>${testsThreadCount}</forkCount>
|
||||||
|
<argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true</argLine>
|
||||||
|
<systemPropertyVariables>
|
||||||
|
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
|
||||||
|
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
|
||||||
|
</systemPropertyVariables>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
</profile>
|
||||||
</profiles>
|
</profiles>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||||
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
|
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
|
||||||
import org.apache.hadoop.hdfs.web.URLUtils;
|
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
@ -86,6 +86,8 @@ public class HftpFileSystem extends FileSystem
|
||||||
HttpURLConnection.setFollowRedirects(true);
|
HttpURLConnection.setFollowRedirects(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||||
|
|
||||||
public static final Text TOKEN_KIND = new Text("HFTP delegation");
|
public static final Text TOKEN_KIND = new Text("HFTP delegation");
|
||||||
|
|
||||||
protected UserGroupInformation ugi;
|
protected UserGroupInformation ugi;
|
||||||
|
@ -331,8 +333,8 @@ public class HftpFileSystem extends FileSystem
|
||||||
throws IOException {
|
throws IOException {
|
||||||
query = addDelegationTokenParam(query);
|
query = addDelegationTokenParam(query);
|
||||||
final URL url = getNamenodeURL(path, query);
|
final URL url = getNamenodeURL(path, query);
|
||||||
final HttpURLConnection connection =
|
final HttpURLConnection connection;
|
||||||
(HttpURLConnection)URLUtils.openConnection(url);
|
connection = (HttpURLConnection)connectionFactory.openConnection(url);
|
||||||
connection.setRequestMethod("GET");
|
connection.setRequestMethod("GET");
|
||||||
connection.connect();
|
connection.connect();
|
||||||
return connection;
|
return connection;
|
||||||
|
@ -352,12 +354,14 @@ public class HftpFileSystem extends FileSystem
|
||||||
}
|
}
|
||||||
|
|
||||||
static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
|
static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
|
||||||
|
URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||||
|
|
||||||
RangeHeaderUrlOpener(final URL url) {
|
RangeHeaderUrlOpener(final URL url) {
|
||||||
super(url);
|
super(url);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected HttpURLConnection openConnection() throws IOException {
|
protected HttpURLConnection openConnection() throws IOException {
|
||||||
return (HttpURLConnection)URLUtils.openConnection(url);
|
return (HttpURLConnection)connectionFactory.openConnection(url);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Use HTTP Range header for specifying offset. */
|
/** Use HTTP Range header for specifying offset. */
|
||||||
|
|
|
@ -40,7 +40,6 @@ import javax.net.ssl.X509TrustManager;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.web.URLUtils;
|
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -154,7 +153,8 @@ public class HsftpFileSystem extends HftpFileSystem {
|
||||||
query = addDelegationTokenParam(query);
|
query = addDelegationTokenParam(query);
|
||||||
final URL url = new URL("https", nnUri.getHost(),
|
final URL url = new URL("https", nnUri.getHost(),
|
||||||
nnUri.getPort(), path + '?' + query);
|
nnUri.getPort(), path + '?' + query);
|
||||||
HttpsURLConnection conn = (HttpsURLConnection)URLUtils.openConnection(url);
|
HttpsURLConnection conn;
|
||||||
|
conn = (HttpsURLConnection)connectionFactory.openConnection(url);
|
||||||
// bypass hostname verification
|
// bypass hostname verification
|
||||||
conn.setHostnameVerifier(new DummyHostnameVerifier());
|
conn.setHostnameVerifier(new DummyHostnameVerifier());
|
||||||
conn.setRequestMethod("GET");
|
conn.setRequestMethod("GET");
|
||||||
|
|
|
@ -119,13 +119,15 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
|
|
||||||
/** SPNEGO authenticator */
|
/** SPNEGO authenticator */
|
||||||
private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
|
private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
|
||||||
|
/** Default connection factory may be overriden in tests to use smaller timeout values */
|
||||||
|
URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||||
/** Configures connections for AuthenticatedURL */
|
/** Configures connections for AuthenticatedURL */
|
||||||
private static final ConnectionConfigurator CONN_CONFIGURATOR =
|
private final ConnectionConfigurator CONN_CONFIGURATOR =
|
||||||
new ConnectionConfigurator() {
|
new ConnectionConfigurator() {
|
||||||
@Override
|
@Override
|
||||||
public HttpURLConnection configure(HttpURLConnection conn)
|
public HttpURLConnection configure(HttpURLConnection conn)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
URLUtils.setTimeouts(conn);
|
connectionFactory.setTimeouts(conn);
|
||||||
return conn;
|
return conn;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -479,10 +481,9 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
|
final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
|
||||||
conn = new AuthenticatedURL(AUTH, CONN_CONFIGURATOR).openConnection(
|
conn = new AuthenticatedURL(AUTH, CONN_CONFIGURATOR).openConnection(
|
||||||
url, authToken);
|
url, authToken);
|
||||||
URLUtils.setTimeouts(conn);
|
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("open URL connection");
|
LOG.debug("open URL connection");
|
||||||
conn = (HttpURLConnection)URLUtils.openConnection(url);
|
conn = (HttpURLConnection)connectionFactory.openConnection(url);
|
||||||
}
|
}
|
||||||
} catch (AuthenticationException e) {
|
} catch (AuthenticationException e) {
|
||||||
throw new IOException(e);
|
throw new IOException(e);
|
||||||
|
@ -577,7 +578,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
checkRetry = false;
|
checkRetry = false;
|
||||||
|
|
||||||
//Step 2) Submit another Http request with the URL from the Location header with data.
|
//Step 2) Submit another Http request with the URL from the Location header with data.
|
||||||
conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect));
|
conn = (HttpURLConnection)connectionFactory.openConnection(new URL(redirect));
|
||||||
conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
|
conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
|
||||||
conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
|
conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
|
||||||
connect();
|
connect();
|
||||||
|
@ -600,7 +601,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
disconnect();
|
disconnect();
|
||||||
|
|
||||||
checkRetry = false;
|
checkRetry = false;
|
||||||
conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect));
|
conn = (HttpURLConnection)connectionFactory.openConnection(new URL(redirect));
|
||||||
connect();
|
connect();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
|
||||||
|
@ -27,6 +28,7 @@ import org.apache.hadoop.fs.Options.Rename;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -56,8 +58,7 @@ public class TestFiRename {
|
||||||
private static String addChild = "";
|
private static String addChild = "";
|
||||||
private static byte[] data = { 0 };
|
private static byte[] data = { 0 };
|
||||||
|
|
||||||
private static String TEST_ROOT_DIR =
|
private static String TEST_ROOT_DIR = PathUtils.getTestDirName(TestFiRename.class);
|
||||||
System.getProperty("test.build.data", "/tmp") + "/test";
|
|
||||||
|
|
||||||
private static Configuration CONF = new Configuration();
|
private static Configuration CONF = new Configuration();
|
||||||
static {
|
static {
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
|
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
|
@ -36,6 +37,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
@ -48,6 +50,7 @@ import org.junit.Test;
|
||||||
* underlying file system as Hdfs.
|
* underlying file system as Hdfs.
|
||||||
*/
|
*/
|
||||||
public class TestResolveHdfsSymlink {
|
public class TestResolveHdfsSymlink {
|
||||||
|
private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestResolveHdfsSymlink.class);
|
||||||
private static MiniDFSCluster cluster = null;
|
private static MiniDFSCluster cluster = null;
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
|
@ -80,12 +83,12 @@ public class TestResolveHdfsSymlink {
|
||||||
.getUri());
|
.getUri());
|
||||||
|
|
||||||
Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri()
|
Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri()
|
||||||
.toString(), "/tmp/alpha");
|
.toString(), new File(TEST_ROOT_DIR, "alpha").getAbsolutePath());
|
||||||
DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16,
|
DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16,
|
||||||
(short) 1, 2);
|
(short) 1, 2);
|
||||||
|
|
||||||
Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri()
|
Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri()
|
||||||
.toString(), "/tmp");
|
.toString(), TEST_ROOT_DIR.getAbsolutePath());
|
||||||
Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),
|
Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),
|
||||||
"/tmp/link");
|
"/tmp/link");
|
||||||
fcHdfs.createSymlink(linkTarget, hdfsLink, true);
|
fcHdfs.createSymlink(linkTarget, hdfsLink, true);
|
||||||
|
|
|
@ -31,6 +31,7 @@ import java.net.URL;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -38,6 +39,8 @@ import org.junit.Test;
|
||||||
*/
|
*/
|
||||||
public class TestUrlStreamHandler {
|
public class TestUrlStreamHandler {
|
||||||
|
|
||||||
|
private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestUrlStreamHandler.class);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test opening and reading from an InputStream through a hdfs:// URL.
|
* Test opening and reading from an InputStream through a hdfs:// URL.
|
||||||
* <p>
|
* <p>
|
||||||
|
@ -111,13 +114,12 @@ public class TestUrlStreamHandler {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
// Locate the test temporary directory.
|
// Locate the test temporary directory.
|
||||||
File tmpDir = new File(conf.get("hadoop.tmp.dir"));
|
if (!TEST_ROOT_DIR.exists()) {
|
||||||
if (!tmpDir.exists()) {
|
if (!TEST_ROOT_DIR.mkdirs())
|
||||||
if (!tmpDir.mkdirs())
|
throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR);
|
||||||
throw new IOException("Cannot create temporary directory: " + tmpDir);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
File tmpFile = new File(tmpDir, "thefile");
|
File tmpFile = new File(TEST_ROOT_DIR, "thefile");
|
||||||
URI uri = tmpFile.toURI();
|
URI uri = tmpFile.toURI();
|
||||||
|
|
||||||
FileSystem fs = FileSystem.get(uri, conf);
|
FileSystem fs = FileSystem.get(uri, conf);
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configured;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
@ -39,8 +40,7 @@ import org.junit.Test;
|
||||||
public class TestLoadGenerator extends Configured implements Tool {
|
public class TestLoadGenerator extends Configured implements Tool {
|
||||||
private static final Configuration CONF = new HdfsConfiguration();
|
private static final Configuration CONF = new HdfsConfiguration();
|
||||||
private static final int DEFAULT_BLOCK_SIZE = 10;
|
private static final int DEFAULT_BLOCK_SIZE = 10;
|
||||||
private static final String OUT_DIR =
|
private static final File OUT_DIR = PathUtils.getTestDir(TestLoadGenerator.class);
|
||||||
System.getProperty("test.build.data","build/test/data");
|
|
||||||
private static final File DIR_STRUCTURE_FILE =
|
private static final File DIR_STRUCTURE_FILE =
|
||||||
new File(OUT_DIR, StructureGenerator.DIR_STRUCTURE_FILE_NAME);
|
new File(OUT_DIR, StructureGenerator.DIR_STRUCTURE_FILE_NAME);
|
||||||
private static final File FILE_STRUCTURE_FILE =
|
private static final File FILE_STRUCTURE_FILE =
|
||||||
|
@ -65,7 +65,7 @@ public class TestLoadGenerator extends Configured implements Tool {
|
||||||
StructureGenerator sg = new StructureGenerator();
|
StructureGenerator sg = new StructureGenerator();
|
||||||
String[] args = new String[]{"-maxDepth", "2", "-minWidth", "1",
|
String[] args = new String[]{"-maxDepth", "2", "-minWidth", "1",
|
||||||
"-maxWidth", "2", "-numOfFiles", "2",
|
"-maxWidth", "2", "-numOfFiles", "2",
|
||||||
"-avgFileSize", "1", "-outDir", OUT_DIR, "-seed", "1"};
|
"-avgFileSize", "1", "-outDir", OUT_DIR.getAbsolutePath(), "-seed", "1"};
|
||||||
|
|
||||||
final int MAX_DEPTH = 1;
|
final int MAX_DEPTH = 1;
|
||||||
final int MIN_WIDTH = 3;
|
final int MIN_WIDTH = 3;
|
||||||
|
@ -133,8 +133,7 @@ public class TestLoadGenerator extends Configured implements Tool {
|
||||||
public void testLoadGenerator() throws Exception {
|
public void testLoadGenerator() throws Exception {
|
||||||
final String TEST_SPACE_ROOT = "/test";
|
final String TEST_SPACE_ROOT = "/test";
|
||||||
|
|
||||||
final String SCRIPT_TEST_DIR = new File(System.getProperty("test.build.data",
|
final String SCRIPT_TEST_DIR = OUT_DIR.getAbsolutePath();
|
||||||
"/tmp")).getAbsolutePath();
|
|
||||||
String script = SCRIPT_TEST_DIR + "/" + "loadgenscript";
|
String script = SCRIPT_TEST_DIR + "/" + "loadgenscript";
|
||||||
String script2 = SCRIPT_TEST_DIR + "/" + "loadgenscript2";
|
String script2 = SCRIPT_TEST_DIR + "/" + "loadgenscript2";
|
||||||
File scriptFile1 = new File(script);
|
File scriptFile1 = new File(script);
|
||||||
|
@ -156,7 +155,7 @@ public class TestLoadGenerator extends Configured implements Tool {
|
||||||
try {
|
try {
|
||||||
DataGenerator dg = new DataGenerator();
|
DataGenerator dg = new DataGenerator();
|
||||||
dg.setConf(CONF);
|
dg.setConf(CONF);
|
||||||
String [] args = new String[] {"-inDir", OUT_DIR, "-root", TEST_SPACE_ROOT};
|
String [] args = new String[] {"-inDir", OUT_DIR.getAbsolutePath(), "-root", TEST_SPACE_ROOT};
|
||||||
assertEquals(0, dg.run(args));
|
assertEquals(0, dg.run(args));
|
||||||
|
|
||||||
final int READ_PROBABILITY = 1;
|
final int READ_PROBABILITY = 1;
|
||||||
|
|
|
@ -1407,6 +1407,13 @@ public class MiniDFSCluster {
|
||||||
* Shutdown all the nodes in the cluster.
|
* Shutdown all the nodes in the cluster.
|
||||||
*/
|
*/
|
||||||
public void shutdown() {
|
public void shutdown() {
|
||||||
|
shutdown(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shutdown all the nodes in the cluster.
|
||||||
|
*/
|
||||||
|
public void shutdown(boolean deleteDfsDir) {
|
||||||
LOG.info("Shutting down the Mini HDFS Cluster");
|
LOG.info("Shutting down the Mini HDFS Cluster");
|
||||||
if (checkExitOnShutdown) {
|
if (checkExitOnShutdown) {
|
||||||
if (ExitUtil.terminateCalled()) {
|
if (ExitUtil.terminateCalled()) {
|
||||||
|
@ -1426,6 +1433,11 @@ public class MiniDFSCluster {
|
||||||
nameNode = null;
|
nameNode = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (deleteDfsDir) {
|
||||||
|
base_dir.delete();
|
||||||
|
} else {
|
||||||
|
base_dir.deleteOnExit();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2118,7 +2130,7 @@ public class MiniDFSCluster {
|
||||||
* <li><base directory>/data/data<2*dnIndex + 1></li>
|
* <li><base directory>/data/data<2*dnIndex + 1></li>
|
||||||
* <li><base directory>/data/data<2*dnIndex + 2></li>
|
* <li><base directory>/data/data<2*dnIndex + 2></li>
|
||||||
* </ol>
|
* </ol>
|
||||||
*
|
*
|
||||||
* @param dnIndex datanode index (starts from 0)
|
* @param dnIndex datanode index (starts from 0)
|
||||||
* @param dirIndex directory index (0 or 1). Index 0 provides access to the
|
* @param dirIndex directory index (0 or 1). Index 0 provides access to the
|
||||||
* first storage directory. Index 1 provides access to the second
|
* first storage directory. Index 1 provides access to the second
|
||||||
|
@ -2149,7 +2161,7 @@ public class MiniDFSCluster {
|
||||||
public static String getDNCurrentDir(File storageDir) {
|
public static String getDNCurrentDir(File storageDir) {
|
||||||
return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/";
|
return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/";
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get directory corresponding to block pool directory in the datanode
|
* Get directory corresponding to block pool directory in the datanode
|
||||||
* @param storageDir the storage directory of a datanode.
|
* @param storageDir the storage directory of a datanode.
|
||||||
|
@ -2255,7 +2267,7 @@ public class MiniDFSCluster {
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the block metadata file for a block from a given datanode
|
* Get the block metadata file for a block from a given datanode
|
||||||
*
|
*
|
||||||
|
@ -2343,14 +2355,17 @@ public class MiniDFSCluster {
|
||||||
} else {
|
} else {
|
||||||
if (checkDataNodeAddrConfig) {
|
if (checkDataNodeAddrConfig) {
|
||||||
conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
|
conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
|
||||||
conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
|
|
||||||
conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
|
|
||||||
} else {
|
} else {
|
||||||
conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
|
conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
|
||||||
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
|
|
||||||
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (checkDataNodeAddrConfig) {
|
||||||
|
conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
|
||||||
|
conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
|
||||||
|
} else {
|
||||||
|
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
|
||||||
|
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addToFile(String p, String address) throws IOException {
|
private void addToFile(String p, String address) throws IOException {
|
||||||
|
|
|
@ -71,10 +71,6 @@ public class TestClientReportBadBlock {
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void startUpCluster() throws IOException {
|
public void startUpCluster() throws IOException {
|
||||||
if (System.getProperty("test.build.data") == null) { // to allow test to be
|
|
||||||
// run outside of Ant
|
|
||||||
System.setProperty("test.build.data", "build/test/data");
|
|
||||||
}
|
|
||||||
// disable block scanner
|
// disable block scanner
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
|
||||||
|
|
||||||
|
|
|
@ -20,9 +20,6 @@ package org.apache.hadoop.hdfs;
|
||||||
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
|
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
|
||||||
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
|
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.compress.BZip2Codec;
|
import org.apache.hadoop.io.compress.BZip2Codec;
|
||||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
@ -61,9 +62,7 @@ public class TestDFSShell {
|
||||||
private static final Log LOG = LogFactory.getLog(TestDFSShell.class);
|
private static final Log LOG = LogFactory.getLog(TestDFSShell.class);
|
||||||
private static AtomicInteger counter = new AtomicInteger();
|
private static AtomicInteger counter = new AtomicInteger();
|
||||||
|
|
||||||
static final String TEST_ROOT_DIR =
|
static final String TEST_ROOT_DIR = PathUtils.getTestDirName(TestDFSShell.class);
|
||||||
new Path(System.getProperty("test.build.data","/tmp"))
|
|
||||||
.toString().replace(' ', '+');
|
|
||||||
|
|
||||||
static Path writeFile(FileSystem fs, Path f) throws IOException {
|
static Path writeFile(FileSystem fs, Path f) throws IOException {
|
||||||
DataOutputStream out = fs.create(f);
|
DataOutputStream out = fs.create(f);
|
||||||
|
@ -482,12 +481,11 @@ public class TestDFSShell {
|
||||||
Configuration dstConf = new HdfsConfiguration();
|
Configuration dstConf = new HdfsConfiguration();
|
||||||
MiniDFSCluster srcCluster = null;
|
MiniDFSCluster srcCluster = null;
|
||||||
MiniDFSCluster dstCluster = null;
|
MiniDFSCluster dstCluster = null;
|
||||||
String bak = System.getProperty("test.build.data");
|
File bak = new File(PathUtils.getTestDir(getClass()), "dfs_tmp_uri");
|
||||||
|
bak.mkdirs();
|
||||||
try{
|
try{
|
||||||
srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
|
srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
|
||||||
File nameDir = new File(new File(bak), "dfs_tmp_uri/");
|
dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, bak.getAbsolutePath());
|
||||||
nameDir.mkdirs();
|
|
||||||
System.setProperty("test.build.data", nameDir.toString());
|
|
||||||
dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
|
dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
|
||||||
FileSystem srcFs = srcCluster.getFileSystem();
|
FileSystem srcFs = srcCluster.getFileSystem();
|
||||||
FileSystem dstFs = dstCluster.getFileSystem();
|
FileSystem dstFs = dstCluster.getFileSystem();
|
||||||
|
@ -559,7 +557,6 @@ public class TestDFSShell {
|
||||||
ret = ToolRunner.run(shell, argv);
|
ret = ToolRunner.run(shell, argv);
|
||||||
assertEquals("default works for rm/rmr", 0, ret);
|
assertEquals("default works for rm/rmr", 0, ret);
|
||||||
} finally {
|
} finally {
|
||||||
System.setProperty("test.build.data", bak);
|
|
||||||
if (null != srcCluster) {
|
if (null != srcCluster) {
|
||||||
srcCluster.shutdown();
|
srcCluster.shutdown();
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -75,7 +76,7 @@ public class TestDecommission {
|
||||||
// Set up the hosts/exclude files.
|
// Set up the hosts/exclude files.
|
||||||
localFileSys = FileSystem.getLocal(conf);
|
localFileSys = FileSystem.getLocal(conf);
|
||||||
Path workingDir = localFileSys.getWorkingDirectory();
|
Path workingDir = localFileSys.getWorkingDirectory();
|
||||||
Path dir = new Path(workingDir, System.getProperty("test.build.data", "target/test/data") + "/work-dir/decommission");
|
Path dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
|
||||||
hostsFile = new Path(dir, "hosts");
|
hostsFile = new Path(dir, "hosts");
|
||||||
excludeFile = new Path(dir, "exclude");
|
excludeFile = new Path(dir, "exclude");
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,8 @@ import static org.junit.Assert.fail;
|
||||||
import static org.mockito.Matchers.eq;
|
import static org.mockito.Matchers.eq;
|
||||||
import static org.mockito.Mockito.inOrder;
|
import static org.mockito.Mockito.inOrder;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
@ -73,6 +75,9 @@ public class TestDistributedFileSystem {
|
||||||
HdfsConfiguration conf;
|
HdfsConfiguration conf;
|
||||||
if (noXmlDefaults) {
|
if (noXmlDefaults) {
|
||||||
conf = new HdfsConfiguration(false);
|
conf = new HdfsConfiguration(false);
|
||||||
|
String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name").getAbsolutePath();
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir);
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir);
|
||||||
} else {
|
} else {
|
||||||
conf = new HdfsConfiguration();
|
conf = new HdfsConfiguration();
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.fs.LocalFileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -260,7 +261,7 @@ public class TestFSInputChecker {
|
||||||
// create a file and verify that checksum corruption results in
|
// create a file and verify that checksum corruption results in
|
||||||
// a checksum exception on LocalFS
|
// a checksum exception on LocalFS
|
||||||
|
|
||||||
String dir = System.getProperty("test.build.data", ".");
|
String dir = PathUtils.getTestDirName(getClass());
|
||||||
Path file = new Path(dir + "/corruption-test.dat");
|
Path file = new Path(dir + "/corruption-test.dat");
|
||||||
Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
|
Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||||
import org.apache.hadoop.hdfs.util.Holder;
|
import org.apache.hadoop.hdfs.util.Holder;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -145,7 +146,7 @@ public class TestFileAppendRestart {
|
||||||
|
|
||||||
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
|
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
|
||||||
+ "/" + HADOOP_23_BROKEN_APPEND_TGZ;
|
+ "/" + HADOOP_23_BROKEN_APPEND_TGZ;
|
||||||
String testDir = System.getProperty("test.build.data", "build/test/data");
|
String testDir = PathUtils.getTestDirName(getClass());
|
||||||
File dfsDir = new File(testDir, "image-with-buggy-append");
|
File dfsDir = new File(testDir, "image-with-buggy-append");
|
||||||
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
|
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
|
||||||
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
|
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -95,7 +96,7 @@ public class TestFileCorruption {
|
||||||
@Test
|
@Test
|
||||||
public void testLocalFileCorruption() throws Exception {
|
public void testLocalFileCorruption() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
Path file = new Path(System.getProperty("test.build.data"), "corruptFile");
|
Path file = new Path(PathUtils.getTestDirName(getClass()), "corruptFile");
|
||||||
FileSystem fs = FileSystem.getLocal(conf);
|
FileSystem fs = FileSystem.getLocal(conf);
|
||||||
DataOutputStream dos = fs.create(file);
|
DataOutputStream dos = fs.create(file);
|
||||||
dos.writeBytes("original bytes");
|
dos.writeBytes("original bytes");
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.namenode.BackupNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||||
import org.apache.hadoop.net.DNS;
|
import org.apache.hadoop.net.DNS;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -53,6 +54,9 @@ public class TestHDFSServerPorts {
|
||||||
|
|
||||||
// reset default 0.0.0.0 addresses in order to avoid IPv6 problem
|
// reset default 0.0.0.0 addresses in order to avoid IPv6 problem
|
||||||
static final String THIS_HOST = getFullHostName() + ":0";
|
static final String THIS_HOST = getFullHostName() + ":0";
|
||||||
|
|
||||||
|
private static final File TEST_DATA_DIR = PathUtils.getTestDir(TestHDFSServerPorts.class);
|
||||||
|
|
||||||
static {
|
static {
|
||||||
DefaultMetricsSystem.setMiniClusterMode(true);
|
DefaultMetricsSystem.setMiniClusterMode(true);
|
||||||
}
|
}
|
||||||
|
@ -81,13 +85,6 @@ public class TestHDFSServerPorts {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get base directory these tests should run in.
|
|
||||||
*/
|
|
||||||
private String getTestingDir() {
|
|
||||||
return System.getProperty("test.build.data", "build/test/data");
|
|
||||||
}
|
|
||||||
|
|
||||||
public NameNode startNameNode() throws IOException {
|
public NameNode startNameNode() throws IOException {
|
||||||
return startNameNode(false);
|
return startNameNode(false);
|
||||||
}
|
}
|
||||||
|
@ -95,8 +92,7 @@ public class TestHDFSServerPorts {
|
||||||
* Start the namenode.
|
* Start the namenode.
|
||||||
*/
|
*/
|
||||||
public NameNode startNameNode(boolean withService) throws IOException {
|
public NameNode startNameNode(boolean withService) throws IOException {
|
||||||
String dataDir = getTestingDir();
|
hdfsDir = new File(TEST_DATA_DIR, "dfs");
|
||||||
hdfsDir = new File(dataDir, "dfs");
|
|
||||||
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
|
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
|
||||||
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
|
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
|
||||||
}
|
}
|
||||||
|
@ -119,9 +115,8 @@ public class TestHDFSServerPorts {
|
||||||
* Start the BackupNode
|
* Start the BackupNode
|
||||||
*/
|
*/
|
||||||
public BackupNode startBackupNode(Configuration conf) throws IOException {
|
public BackupNode startBackupNode(Configuration conf) throws IOException {
|
||||||
String dataDir = getTestingDir();
|
|
||||||
// Set up testing environment directories
|
// Set up testing environment directories
|
||||||
hdfsDir = new File(dataDir, "backupNode");
|
hdfsDir = new File(TEST_DATA_DIR, "backupNode");
|
||||||
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
|
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
|
||||||
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
|
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
|
||||||
}
|
}
|
||||||
|
@ -150,8 +145,7 @@ public class TestHDFSServerPorts {
|
||||||
*/
|
*/
|
||||||
public DataNode startDataNode(int index, Configuration config)
|
public DataNode startDataNode(int index, Configuration config)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String dataDir = getTestingDir();
|
File dataNodeDir = new File(TEST_DATA_DIR, "data-" + index);
|
||||||
File dataNodeDir = new File(dataDir, "data-" + index);
|
|
||||||
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());
|
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());
|
||||||
|
|
||||||
String[] args = new String[] {};
|
String[] args = new String[] {};
|
||||||
|
|
|
@ -33,16 +33,11 @@ import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.hdfs.web.URLUtils;
|
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
public class TestHftpURLTimeouts {
|
public class TestHftpURLTimeouts {
|
||||||
@BeforeClass
|
|
||||||
public static void setup() {
|
|
||||||
URLUtils.SOCKET_TIMEOUT = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testHftpSocketTimeout() throws Exception {
|
public void testHftpSocketTimeout() throws Exception {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
|
@ -51,9 +46,11 @@ public class TestHftpURLTimeouts {
|
||||||
InetAddress.getByName(null).getHostAddress(),
|
InetAddress.getByName(null).getHostAddress(),
|
||||||
socket.getLocalPort(),
|
socket.getLocalPort(),
|
||||||
null, null, null);
|
null, null, null);
|
||||||
boolean timedout = false;
|
|
||||||
|
|
||||||
HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf);
|
HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf);
|
||||||
|
fs.connectionFactory = new URLConnectionFactory(5);
|
||||||
|
|
||||||
|
boolean timedout = false;
|
||||||
try {
|
try {
|
||||||
HttpURLConnection conn = fs.openConnection("/", "");
|
HttpURLConnection conn = fs.openConnection("/", "");
|
||||||
timedout = false;
|
timedout = false;
|
||||||
|
@ -69,6 +66,7 @@ public class TestHftpURLTimeouts {
|
||||||
assertTrue("read timedout", timedout);
|
assertTrue("read timedout", timedout);
|
||||||
assertTrue("connect timedout", checkConnectTimeout(fs, false));
|
assertTrue("connect timedout", checkConnectTimeout(fs, false));
|
||||||
} finally {
|
} finally {
|
||||||
|
fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||||
fs.close();
|
fs.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -84,6 +82,8 @@ public class TestHftpURLTimeouts {
|
||||||
boolean timedout = false;
|
boolean timedout = false;
|
||||||
|
|
||||||
HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf);
|
HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf);
|
||||||
|
fs.connectionFactory = new URLConnectionFactory(5);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
HttpURLConnection conn = null;
|
HttpURLConnection conn = null;
|
||||||
timedout = false;
|
timedout = false;
|
||||||
|
@ -100,6 +100,7 @@ public class TestHftpURLTimeouts {
|
||||||
assertTrue("ssl read connect timedout", timedout);
|
assertTrue("ssl read connect timedout", timedout);
|
||||||
assertTrue("connect timedout", checkConnectTimeout(fs, true));
|
assertTrue("connect timedout", checkConnectTimeout(fs, true));
|
||||||
} finally {
|
} finally {
|
||||||
|
fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||||
fs.close();
|
fs.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -121,7 +122,7 @@ public class TestHftpURLTimeouts {
|
||||||
// https will get a read timeout due to SSL negotiation, but
|
// https will get a read timeout due to SSL negotiation, but
|
||||||
// a normal http will not, so need to ignore SSL read timeouts
|
// a normal http will not, so need to ignore SSL read timeouts
|
||||||
// until a connect timeout occurs
|
// until a connect timeout occurs
|
||||||
if (!(ignoreReadTimeout && message.equals("Read timed out"))) {
|
if (!(ignoreReadTimeout && "Read timed out".equals(message))) {
|
||||||
timedout = true;
|
timedout = true;
|
||||||
assertEquals("connect timed out", message);
|
assertEquals("connect timed out", message);
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,8 +25,8 @@ import static org.junit.Assume.assumeTrue;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.junit.After;
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -43,20 +43,10 @@ public class TestMiniDFSCluster {
|
||||||
private static final String CLUSTER_3 = "cluster3";
|
private static final String CLUSTER_3 = "cluster3";
|
||||||
private static final String CLUSTER_4 = "cluster4";
|
private static final String CLUSTER_4 = "cluster4";
|
||||||
private static final String CLUSTER_5 = "cluster5";
|
private static final String CLUSTER_5 = "cluster5";
|
||||||
protected String testDataPath;
|
protected File testDataPath;
|
||||||
protected File testDataDir;
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() {
|
public void setUp() {
|
||||||
testDataPath = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA,
|
testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters");
|
||||||
"build/test/data");
|
|
||||||
testDataDir = new File(new File(testDataPath).getParentFile(),
|
|
||||||
"miniclusters");
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
@After
|
|
||||||
public void tearDown() {
|
|
||||||
System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testDataPath);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -120,7 +110,7 @@ public class TestMiniDFSCluster {
|
||||||
MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
|
MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
|
||||||
try {
|
try {
|
||||||
DistributedFileSystem dfs = (DistributedFileSystem) cluster4.getFileSystem();
|
DistributedFileSystem dfs = (DistributedFileSystem) cluster4.getFileSystem();
|
||||||
dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
|
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
|
||||||
cluster4.shutdown();
|
cluster4.shutdown();
|
||||||
} finally {
|
} finally {
|
||||||
while(cluster4.isClusterUp()){
|
while(cluster4.isClusterUp()){
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -322,7 +323,7 @@ public class TestPersistBlocks {
|
||||||
|
|
||||||
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
|
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
|
||||||
+ "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
|
+ "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
|
||||||
String testDir = System.getProperty("test.build.data", "build/test/data");
|
String testDir = PathUtils.getTestDirName(getClass());
|
||||||
File dfsDir = new File(testDir, "image-1.0");
|
File dfsDir = new File(testDir, "image-1.0");
|
||||||
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
|
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
|
||||||
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
|
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
|
||||||
|
|
|
@ -174,6 +174,7 @@ public class TestNNWithQJM {
|
||||||
public void testMismatchedNNIsRejected() throws Exception {
|
public void testMismatchedNNIsRejected() throws Exception {
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
|
MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
|
||||||
|
String defaultEditsDir = conf.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||||
mjc.getQuorumJournalURI("myjournal").toString());
|
mjc.getQuorumJournalURI("myjournal").toString());
|
||||||
|
|
||||||
|
@ -187,7 +188,7 @@ public class TestNNWithQJM {
|
||||||
|
|
||||||
// Reformat just the on-disk portion
|
// Reformat just the on-disk portion
|
||||||
Configuration onDiskOnly = new Configuration(conf);
|
Configuration onDiskOnly = new Configuration(conf);
|
||||||
onDiskOnly.unset(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
|
onDiskOnly.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, defaultEditsDir);
|
||||||
NameNode.format(onDiskOnly);
|
NameNode.format(onDiskOnly);
|
||||||
|
|
||||||
// Start the NN - should fail because the JNs are still formatted
|
// Start the NN - should fail because the JNs are still formatted
|
||||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.test.MetricsAsserts;
|
import org.apache.hadoop.test.MetricsAsserts;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.Shell;
|
import org.apache.hadoop.util.Shell;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -61,13 +62,13 @@ public class TestJournalNode {
|
||||||
private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
|
private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
|
||||||
12345, "mycluster", "my-bp", 0L);
|
12345, "mycluster", "my-bp", 0L);
|
||||||
|
|
||||||
|
private static File TEST_BUILD_DATA = PathUtils.getTestDir(TestJournalNode.class);
|
||||||
|
|
||||||
private JournalNode jn;
|
private JournalNode jn;
|
||||||
private Journal journal;
|
private Journal journal;
|
||||||
private Configuration conf = new Configuration();
|
private Configuration conf = new Configuration();
|
||||||
private IPCLoggerChannel ch;
|
private IPCLoggerChannel ch;
|
||||||
private String journalId;
|
private String journalId;
|
||||||
private File TEST_BUILD_DATA =
|
|
||||||
new File(System.getProperty("test.build.data", "build/test/data"));
|
|
||||||
|
|
||||||
static {
|
static {
|
||||||
// Avoid an error when we double-initialize JvmMetrics
|
// Avoid an error when we double-initialize JvmMetrics
|
||||||
|
|
|
@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
|
|
@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
@ -92,8 +93,7 @@ public class TestReplicationPolicy {
|
||||||
|
|
||||||
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
|
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
|
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
|
||||||
File baseDir = new File(System.getProperty(
|
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
|
||||||
"test.build.data", "build/test/data"), "dfs/");
|
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
new File(baseDir, "name").getPath());
|
new File(baseDir, "name").getPath());
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,6 @@ import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
@ -41,6 +40,9 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
public class TestReplicationPolicyWithNodeGroup {
|
public class TestReplicationPolicyWithNodeGroup {
|
||||||
|
@ -48,10 +50,10 @@ public class TestReplicationPolicyWithNodeGroup {
|
||||||
private static final int NUM_OF_DATANODES = 8;
|
private static final int NUM_OF_DATANODES = 8;
|
||||||
private static final int NUM_OF_DATANODES_BOUNDARY = 6;
|
private static final int NUM_OF_DATANODES_BOUNDARY = 6;
|
||||||
private static final int NUM_OF_DATANODES_MORE_TARGETS = 12;
|
private static final int NUM_OF_DATANODES_MORE_TARGETS = 12;
|
||||||
private static final Configuration CONF = new HdfsConfiguration();
|
private final Configuration CONF = new HdfsConfiguration();
|
||||||
private static final NetworkTopology cluster;
|
private NetworkTopology cluster;
|
||||||
private static final NameNode namenode;
|
private NameNode namenode;
|
||||||
private static final BlockPlacementPolicy replicator;
|
private BlockPlacementPolicy replicator;
|
||||||
private static final String filename = "/dummyfile.txt";
|
private static final String filename = "/dummyfile.txt";
|
||||||
|
|
||||||
private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
|
private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
|
||||||
|
@ -94,27 +96,23 @@ public class TestReplicationPolicyWithNodeGroup {
|
||||||
private final static DatanodeDescriptor NODE =
|
private final static DatanodeDescriptor NODE =
|
||||||
new DatanodeDescriptor(DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d2/r4/n7"));
|
new DatanodeDescriptor(DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d2/r4/n7"));
|
||||||
|
|
||||||
static {
|
@Before
|
||||||
try {
|
public void setUp() throws Exception {
|
||||||
FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
|
FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
|
||||||
CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
|
CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
|
||||||
// Set properties to make HDFS aware of NodeGroup.
|
// Set properties to make HDFS aware of NodeGroup.
|
||||||
CONF.set("dfs.block.replicator.classname",
|
CONF.set("dfs.block.replicator.classname",
|
||||||
"org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup");
|
"org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup");
|
||||||
CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
|
CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
|
||||||
"org.apache.hadoop.net.NetworkTopologyWithNodeGroup");
|
"org.apache.hadoop.net.NetworkTopologyWithNodeGroup");
|
||||||
|
|
||||||
File baseDir = new File(System.getProperty(
|
File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
|
||||||
"test.build.data", "build/test/data"), "dfs/");
|
|
||||||
CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
new File(baseDir, "name").getPath());
|
new File(baseDir, "name").getPath());
|
||||||
|
|
||||||
DFSTestUtil.formatNameNode(CONF);
|
DFSTestUtil.formatNameNode(CONF);
|
||||||
namenode = new NameNode(CONF);
|
namenode = new NameNode(CONF);
|
||||||
} catch (IOException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
throw (RuntimeException)new RuntimeException().initCause(e);
|
|
||||||
}
|
|
||||||
final BlockManager bm = namenode.getNamesystem().getBlockManager();
|
final BlockManager bm = namenode.getNamesystem().getBlockManager();
|
||||||
replicator = bm.getBlockPlacementPolicy();
|
replicator = bm.getBlockPlacementPolicy();
|
||||||
cluster = bm.getDatanodeManager().getNetworkTopology();
|
cluster = bm.getDatanodeManager().getNetworkTopology();
|
||||||
|
@ -125,6 +123,11 @@ public class TestReplicationPolicyWithNodeGroup {
|
||||||
setupDataNodeCapacity();
|
setupDataNodeCapacity();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
namenode.stop();
|
||||||
|
}
|
||||||
|
|
||||||
private static void setupDataNodeCapacity() {
|
private static void setupDataNodeCapacity() {
|
||||||
for(int i=0; i<NUM_OF_DATANODES; i++) {
|
for(int i=0; i<NUM_OF_DATANODES; i++) {
|
||||||
dataNodes[i].updateHeartbeat(
|
dataNodes[i].updateHeartbeat(
|
||||||
|
@ -636,7 +639,9 @@ public class TestReplicationPolicyWithNodeGroup {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testChooseMoreTargetsThanNodeGroups() throws Exception {
|
public void testChooseMoreTargetsThanNodeGroups() throws Exception {
|
||||||
// Cleanup nodes in previous tests
|
for(int i=0; i<NUM_OF_DATANODES; i++) {
|
||||||
|
cluster.remove(dataNodes[i]);
|
||||||
|
}
|
||||||
for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
|
for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
|
||||||
DatanodeDescriptor node = dataNodesInBoundaryCase[i];
|
DatanodeDescriptor node = dataNodesInBoundaryCase[i];
|
||||||
if (cluster.contains(node)) {
|
if (cluster.contains(node)) {
|
||||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -70,8 +71,7 @@ public class TestBPOfferService {
|
||||||
TestBPOfferService.class);
|
TestBPOfferService.class);
|
||||||
private static final ExtendedBlock FAKE_BLOCK =
|
private static final ExtendedBlock FAKE_BLOCK =
|
||||||
new ExtendedBlock(FAKE_BPID, 12345L);
|
new ExtendedBlock(FAKE_BPID, 12345L);
|
||||||
private static final String TEST_BUILD_DATA = System.getProperty(
|
private static final File TEST_BUILD_DATA = PathUtils.getTestDir(TestBPOfferService.class);
|
||||||
"test.build.data", "build/test/data");
|
|
||||||
|
|
||||||
static {
|
static {
|
||||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf.DummyJournalManager;
|
import org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf.DummyJournalManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
@ -54,37 +55,34 @@ public class TestAllowFormat {
|
||||||
public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
|
public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
|
||||||
private static final Log LOG =
|
private static final Log LOG =
|
||||||
LogFactory.getLog(TestAllowFormat.class.getName());
|
LogFactory.getLog(TestAllowFormat.class.getName());
|
||||||
|
private static final File DFS_BASE_DIR = new File(PathUtils.getTestDir(TestAllowFormat.class), "dfs");
|
||||||
private static Configuration config;
|
private static Configuration config;
|
||||||
private static MiniDFSCluster cluster = null;
|
private static MiniDFSCluster cluster = null;
|
||||||
private static File hdfsDir=null;
|
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUp() throws Exception {
|
public static void setUp() throws Exception {
|
||||||
config = new Configuration();
|
config = new Configuration();
|
||||||
String baseDir = System.getProperty("test.build.data", "build/test/data");
|
if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
|
||||||
|
throw new IOException("Could not delete hdfs directory '" + DFS_BASE_DIR +
|
||||||
hdfsDir = new File(baseDir, "dfs");
|
|
||||||
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
|
|
||||||
throw new IOException("Could not delete hdfs directory '" + hdfsDir +
|
|
||||||
"'");
|
"'");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test has multiple name directories.
|
// Test has multiple name directories.
|
||||||
// Format should not really prompt us if one of the directories exist,
|
// Format should not really prompt us if one of the directories exist,
|
||||||
// but is empty. So in case the test hangs on an input, it means something
|
// but is empty. So in case the test hangs on an input, it means something
|
||||||
// could be wrong in the format prompting code. (HDFS-1636)
|
// could be wrong in the format prompting code. (HDFS-1636)
|
||||||
LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
|
LOG.info("hdfsdir is " + DFS_BASE_DIR.getAbsolutePath());
|
||||||
File nameDir1 = new File(hdfsDir, "name1");
|
File nameDir1 = new File(DFS_BASE_DIR, "name1");
|
||||||
File nameDir2 = new File(hdfsDir, "name2");
|
File nameDir2 = new File(DFS_BASE_DIR, "name2");
|
||||||
|
|
||||||
// To test multiple directory handling, we pre-create one of the name directories.
|
// To test multiple directory handling, we pre-create one of the name directories.
|
||||||
nameDir1.mkdirs();
|
nameDir1.mkdirs();
|
||||||
|
|
||||||
// Set multiple name directories.
|
// Set multiple name directories.
|
||||||
config.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir1.getPath() + "," + nameDir2.getPath());
|
config.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir1.getPath() + "," + nameDir2.getPath());
|
||||||
config.set(DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
|
config.set(DFS_DATANODE_DATA_DIR_KEY, new File(DFS_BASE_DIR, "data").getPath());
|
||||||
|
|
||||||
config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
|
config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(DFS_BASE_DIR, "secondary").getPath());
|
||||||
|
|
||||||
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
|
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
|
||||||
}
|
}
|
||||||
|
@ -99,9 +97,9 @@ public class TestAllowFormat {
|
||||||
LOG.info("Stopping mini cluster");
|
LOG.info("Stopping mini cluster");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
|
if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
|
||||||
throw new IOException("Could not delete hdfs directory in tearDown '"
|
throw new IOException("Could not delete hdfs directory in tearDown '"
|
||||||
+ hdfsDir + "'");
|
+ DFS_BASE_DIR + "'");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,7 +168,7 @@ public class TestAllowFormat {
|
||||||
HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
|
HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
|
||||||
|
|
||||||
conf.set(DFS_NAMENODE_NAME_DIR_KEY,
|
conf.set(DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
new File(hdfsDir, "name").getAbsolutePath());
|
new File(DFS_BASE_DIR, "name").getAbsolutePath());
|
||||||
conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
|
conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
|
||||||
conf.set(DFSUtil.addKeySuffixes(
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
|
DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
|
||||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.apache.log4j.LogManager;
|
import org.apache.log4j.LogManager;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
@ -58,8 +59,7 @@ import org.junit.Test;
|
||||||
* A JUnit test that audit logs are generated
|
* A JUnit test that audit logs are generated
|
||||||
*/
|
*/
|
||||||
public class TestAuditLogs {
|
public class TestAuditLogs {
|
||||||
static final String auditLogFile = System.getProperty("test.build.dir",
|
static final String auditLogFile = PathUtils.getTestDirName(TestAuditLogs.class) + "/TestAuditLogs-audit.log";
|
||||||
"build/test") + "/audit.log";
|
|
||||||
|
|
||||||
// Pattern for:
|
// Pattern for:
|
||||||
// allowed=(true|false) ugi=name ip=/address cmd={cmd} src={path} dst=null perm=null
|
// allowed=(true|false) ugi=name ip=/address cmd={cmd} src={path} dst=null perm=null
|
||||||
|
|
|
@ -100,6 +100,8 @@ public class TestBackupNode {
|
||||||
"${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
|
"${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
|
||||||
c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
|
c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
|
||||||
"127.0.0.1:0");
|
"127.0.0.1:0");
|
||||||
|
c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
|
||||||
|
"127.0.0.1:0");
|
||||||
|
|
||||||
BackupNode bn = (BackupNode)NameNode.createNameNode(
|
BackupNode bn = (BackupNode)NameNode.createNameNode(
|
||||||
new String[]{startupOpt.getName()}, c);
|
new String[]{startupOpt.getName()}, c);
|
||||||
|
|
|
@ -75,6 +75,7 @@ import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
||||||
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
|
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.ExitUtil;
|
import org.apache.hadoop.util.ExitUtil;
|
||||||
import org.apache.hadoop.util.ExitUtil.ExitException;
|
import org.apache.hadoop.util.ExitUtil.ExitException;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
@ -193,7 +194,7 @@ public class TestCheckpoint {
|
||||||
ArrayList<URI> fsImageDirs = new ArrayList<URI>();
|
ArrayList<URI> fsImageDirs = new ArrayList<URI>();
|
||||||
ArrayList<URI> editsDirs = new ArrayList<URI>();
|
ArrayList<URI> editsDirs = new ArrayList<URI>();
|
||||||
File filePath =
|
File filePath =
|
||||||
new File(System.getProperty("test.build.data","/tmp"), "storageDirToCheck");
|
new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
|
||||||
assertTrue("Couldn't create directory storageDirToCheck",
|
assertTrue("Couldn't create directory storageDirToCheck",
|
||||||
filePath.exists() || filePath.mkdirs());
|
filePath.exists() || filePath.mkdirs());
|
||||||
fsImageDirs.add(filePath.toURI());
|
fsImageDirs.add(filePath.toURI());
|
||||||
|
@ -1911,9 +1912,11 @@ public class TestCheckpoint {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start a new NN with the same host/port.
|
// Start a new NN with the same host/port.
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nameNodePort(origPort).nameNodeHttpPort(origHttpPort).format(true)
|
.numDataNodes(0)
|
||||||
.build();
|
.nameNodePort(origPort)
|
||||||
|
.nameNodeHttpPort(origHttpPort)
|
||||||
|
.format(true).build();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
secondary.doCheckpoint();
|
secondary.doCheckpoint();
|
||||||
|
@ -2135,7 +2138,8 @@ public class TestCheckpoint {
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
.numDataNodes(0)
|
||||||
.format(true).build();
|
.format(true).build();
|
||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
secondary = startSecondaryNameNode(conf);
|
secondary = startSecondaryNameNode(conf);
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.ExitUtil;
|
import org.apache.hadoop.util.ExitUtil;
|
||||||
import org.apache.hadoop.util.ExitUtil.ExitException;
|
import org.apache.hadoop.util.ExitUtil.ExitException;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -72,7 +73,7 @@ public class TestClusterId {
|
||||||
public void setUp() throws IOException {
|
public void setUp() throws IOException {
|
||||||
ExitUtil.disableSystemExit();
|
ExitUtil.disableSystemExit();
|
||||||
|
|
||||||
String baseDir = System.getProperty("test.build.data", "build/test/data");
|
String baseDir = PathUtils.getTestDirName(getClass());
|
||||||
|
|
||||||
hdfsDir = new File(baseDir, "dfs/name");
|
hdfsDir = new File(baseDir, "dfs/name");
|
||||||
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
|
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
|
||||||
|
|
|
@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
@ -96,9 +97,8 @@ public class TestEditLog {
|
||||||
static final int NUM_TRANSACTIONS = 100;
|
static final int NUM_TRANSACTIONS = 100;
|
||||||
static final int NUM_THREADS = 100;
|
static final int NUM_THREADS = 100;
|
||||||
|
|
||||||
static final File TEST_DIR = new File(
|
static final File TEST_DIR = PathUtils.getTestDir(TestEditLog.class);
|
||||||
System.getProperty("test.build.data","build/test/data"));
|
|
||||||
|
|
||||||
/** An edits log with 3 edits from 0.20 - the result of
|
/** An edits log with 3 edits from 0.20 - the result of
|
||||||
* a fresh namesystem followed by hadoop fs -touchz /myfile */
|
* a fresh namesystem followed by hadoop fs -touchz /myfile */
|
||||||
static final byte[] HADOOP20_SOME_EDITS =
|
static final byte[] HADOOP20_SOME_EDITS =
|
||||||
|
@ -569,6 +569,7 @@ public class TestEditLog {
|
||||||
fail("should not be able to start");
|
fail("should not be able to start");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// expected
|
// expected
|
||||||
|
assertNotNull("Cause of exception should be ChecksumException", e.getCause());
|
||||||
assertEquals("Cause of exception should be ChecksumException",
|
assertEquals("Cause of exception should be ChecksumException",
|
||||||
ChecksumException.class, e.getCause().getClass());
|
ChecksumException.class, e.getCause().getClass());
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -34,8 +35,7 @@ import org.junit.Test;
|
||||||
* Test the EditLogFileOutputStream
|
* Test the EditLogFileOutputStream
|
||||||
*/
|
*/
|
||||||
public class TestEditLogFileOutputStream {
|
public class TestEditLogFileOutputStream {
|
||||||
private final static File TEST_DIR =
|
private final static File TEST_DIR = PathUtils.getTestDir(TestEditLogFileOutputStream.class);
|
||||||
new File(System.getProperty("test.build.data", "/tmp"));
|
|
||||||
private static final File TEST_EDITS =
|
private static final File TEST_EDITS =
|
||||||
new File(TEST_DIR, "testEditLogFileOutput.log");
|
new File(TEST_DIR, "testEditLogFileOutput.log");
|
||||||
final static int MIN_PREALLOCATION_LENGTH =
|
final static int MIN_PREALLOCATION_LENGTH =
|
||||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
|
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.ExitUtil.ExitException;
|
import org.apache.hadoop.util.ExitUtil.ExitException;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -192,8 +193,7 @@ public class TestEditLogJournalFailures {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
String[] nameDirs = new String[4];
|
String[] nameDirs = new String[4];
|
||||||
for (int i = 0; i < nameDirs.length; i++) {
|
for (int i = 0; i < nameDirs.length; i++) {
|
||||||
File nameDir = new File(System.getProperty("test.build.data"),
|
File nameDir = new File(PathUtils.getTestDir(getClass()), "name-dir" + i);
|
||||||
"name-dir" + i);
|
|
||||||
nameDir.mkdirs();
|
nameDir.mkdirs();
|
||||||
nameDirs[i] = nameDir.getAbsolutePath();
|
nameDirs[i] = nameDir.getAbsolutePath();
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -60,8 +61,7 @@ public class TestFSEditLogLoader {
|
||||||
((Log4JLogger)FSEditLogLoader.LOG).getLogger().setLevel(Level.ALL);
|
((Log4JLogger)FSEditLogLoader.LOG).getLogger().setLevel(Level.ALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final File TEST_DIR = new File(
|
private static final File TEST_DIR = PathUtils.getTestDir(TestFSEditLogLoader.class);
|
||||||
System.getProperty("test.build.data","build/test/data"));
|
|
||||||
|
|
||||||
private static final int NUM_DATA_NODES = 0;
|
private static final int NUM_DATA_NODES = 0;
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,7 @@ import static org.mockito.Mockito.*;
|
||||||
*/
|
*/
|
||||||
public class TestFsck {
|
public class TestFsck {
|
||||||
static final String auditLogFile = System.getProperty("test.build.dir",
|
static final String auditLogFile = System.getProperty("test.build.dir",
|
||||||
"build/test") + "/audit.log";
|
"build/test") + "/TestFsck-audit.log";
|
||||||
|
|
||||||
// Pattern for:
|
// Pattern for:
|
||||||
// allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
|
// allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
|
||||||
|
@ -159,7 +159,8 @@ public class TestFsck {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
|
|
||||||
// restart the cluster; bring up namenode but not the data nodes
|
// restart the cluster; bring up namenode but not the data nodes
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
.numDataNodes(0).format(false).build();
|
||||||
outStr = runFsck(conf, 1, true, "/");
|
outStr = runFsck(conf, 1, true, "/");
|
||||||
// expect the result is corrupt
|
// expect the result is corrupt
|
||||||
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
|
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -60,7 +61,7 @@ public class TestNameEditsConfigs {
|
||||||
|
|
||||||
short replication = 3;
|
short replication = 3;
|
||||||
private File base_dir = new File(
|
private File base_dir = new File(
|
||||||
System.getProperty("test.build.data", "build/test/data"), "dfs/");
|
PathUtils.getTestDir(TestNameEditsConfigs.class), "dfs");
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws IOException {
|
public void setUp() throws IOException {
|
||||||
|
@ -68,7 +69,7 @@ public class TestNameEditsConfigs {
|
||||||
throw new IOException("Cannot remove directory " + base_dir);
|
throw new IOException("Cannot remove directory " + base_dir);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void checkImageAndEditsFilesExistence(File dir,
|
void checkImageAndEditsFilesExistence(File dir,
|
||||||
boolean shouldHaveImages,
|
boolean shouldHaveImages,
|
||||||
boolean shouldHaveEdits)
|
boolean shouldHaveEdits)
|
||||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -57,8 +58,7 @@ import com.google.common.collect.Sets;
|
||||||
public class TestNameNodeRecovery {
|
public class TestNameNodeRecovery {
|
||||||
private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class);
|
private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class);
|
||||||
private static StartupOption recoverStartOpt = StartupOption.RECOVER;
|
private static StartupOption recoverStartOpt = StartupOption.RECOVER;
|
||||||
private static final File TEST_DIR = new File(
|
private static final File TEST_DIR = PathUtils.getTestDir(TestNameNodeRecovery.class);
|
||||||
System.getProperty("test.build.data","build/test/data"));
|
|
||||||
|
|
||||||
static {
|
static {
|
||||||
recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL);
|
recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL);
|
||||||
|
|
|
@ -33,12 +33,14 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
public class TestNameNodeResourceChecker {
|
public class TestNameNodeResourceChecker {
|
||||||
|
private final static File BASE_DIR = PathUtils.getTestDir(TestNameNodeResourceChecker.class);
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
private File baseDir;
|
private File baseDir;
|
||||||
private File nameDir;
|
private File nameDir;
|
||||||
|
@ -46,8 +48,7 @@ public class TestNameNodeResourceChecker {
|
||||||
@Before
|
@Before
|
||||||
public void setUp () throws IOException {
|
public void setUp () throws IOException {
|
||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
baseDir = new File(System.getProperty("test.build.data"));
|
nameDir = new File(BASE_DIR, "resource-check-name-dir");
|
||||||
nameDir = new File(baseDir, "resource-check-name-dir");
|
|
||||||
nameDir.mkdirs();
|
nameDir.mkdirs();
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
|
||||||
}
|
}
|
||||||
|
@ -141,8 +142,8 @@ public class TestNameNodeResourceChecker {
|
||||||
@Test
|
@Test
|
||||||
public void testChecking2NameDirsOnOneVolume() throws IOException {
|
public void testChecking2NameDirsOnOneVolume() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
|
File nameDir1 = new File(BASE_DIR, "name-dir1");
|
||||||
File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
|
File nameDir2 = new File(BASE_DIR, "name-dir2");
|
||||||
nameDir1.mkdirs();
|
nameDir1.mkdirs();
|
||||||
nameDir2.mkdirs();
|
nameDir2.mkdirs();
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||||
|
@ -162,7 +163,7 @@ public class TestNameNodeResourceChecker {
|
||||||
@Test
|
@Test
|
||||||
public void testCheckingExtraVolumes() throws IOException {
|
public void testCheckingExtraVolumes() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
File nameDir = new File(System.getProperty("test.build.data"), "name-dir");
|
File nameDir = new File(BASE_DIR, "name-dir");
|
||||||
nameDir.mkdirs();
|
nameDir.mkdirs();
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath());
|
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath());
|
||||||
|
@ -182,8 +183,8 @@ public class TestNameNodeResourceChecker {
|
||||||
@Test
|
@Test
|
||||||
public void testLowResourceVolumePolicy() throws IOException, URISyntaxException {
|
public void testLowResourceVolumePolicy() throws IOException, URISyntaxException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
|
File nameDir1 = new File(BASE_DIR, "name-dir1");
|
||||||
File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
|
File nameDir2 = new File(BASE_DIR, "name-dir2");
|
||||||
nameDir1.mkdirs();
|
nameDir1.mkdirs();
|
||||||
nameDir2.mkdirs();
|
nameDir2.mkdirs();
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
||||||
import org.apache.hadoop.io.MD5Hash;
|
import org.apache.hadoop.io.MD5Hash;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -404,8 +405,7 @@ public class TestStartup {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
|
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
|
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
|
||||||
File base_dir = new File(System.getProperty(
|
File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
|
||||||
"test.build.data", "build/test/data"), "dfs/");
|
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
new File(base_dir, "name").getPath());
|
new File(base_dir, "name").getPath());
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
|
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
|
||||||
|
|
|
@ -391,7 +391,8 @@ public class TestStorageRestore {
|
||||||
(new File(path3, "current").getAbsolutePath()) : path3.toString();
|
(new File(path3, "current").getAbsolutePath()) : path3.toString();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(config).numDataNodes(0)
|
cluster = new MiniDFSCluster.Builder(config)
|
||||||
|
.numDataNodes(0)
|
||||||
.manageNameDfsDirs(false).build();
|
.manageNameDfsDirs(false).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.http.HttpServer;
|
import org.apache.hadoop.http.HttpServer;
|
||||||
import org.apache.hadoop.http.HttpServerFunctionalTest;
|
import org.apache.hadoop.http.HttpServerFunctionalTest;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
@ -48,8 +49,7 @@ import com.google.common.collect.ImmutableList;
|
||||||
|
|
||||||
public class TestTransferFsImage {
|
public class TestTransferFsImage {
|
||||||
|
|
||||||
private static final File TEST_DIR = new File(
|
private static final File TEST_DIR = PathUtils.getTestDir(TestTransferFsImage.class);
|
||||||
System.getProperty("test.build.data","build/test/data"));
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Regression test for HDFS-1997. Test that, if an exception
|
* Regression test for HDFS-1997. Test that, if an exception
|
||||||
|
|
|
@ -56,8 +56,8 @@ public class TestBootstrapStandby {
|
||||||
|
|
||||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
|
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(20001))
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
|
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(20002)));
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nnTopology(topology)
|
.nnTopology(topology)
|
||||||
|
|
|
@ -70,13 +70,13 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
|
||||||
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
|
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
|
||||||
0);
|
0);
|
||||||
|
|
||||||
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10003);
|
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10023);
|
||||||
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10004);
|
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10024);
|
||||||
|
|
||||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001))
|
.addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021))
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002)));
|
.addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022)));
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nnTopology(topology)
|
.nnTopology(topology)
|
||||||
.numDataNodes(0)
|
.numDataNodes(0)
|
||||||
|
|
|
@ -124,8 +124,8 @@ public class TestEditLogTailer {
|
||||||
// Have to specify IPC ports so the NNs can talk to each other.
|
// Have to specify IPC ports so the NNs can talk to each other.
|
||||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001))
|
.addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002)));
|
.addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032)));
|
||||||
|
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nnTopology(topology)
|
.nnTopology(topology)
|
||||||
|
|
|
@ -76,8 +76,8 @@ public class TestFailureToReadEdits {
|
||||||
|
|
||||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
|
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10041))
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
|
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10042)));
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nnTopology(topology)
|
.nnTopology(topology)
|
||||||
.numDataNodes(0)
|
.numDataNodes(0)
|
||||||
|
|
|
@ -52,8 +52,8 @@ public class TestHAFsck {
|
||||||
// need some HTTP ports
|
// need some HTTP ports
|
||||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||||
.addNameservice(new MiniDFSNNTopology.NSConf("ha-nn-uri-0")
|
.addNameservice(new MiniDFSNNTopology.NSConf("ha-nn-uri-0")
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
|
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10051))
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
|
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10052)));
|
||||||
|
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nnTopology(topology)
|
.nnTopology(topology)
|
||||||
|
|
|
@ -89,8 +89,8 @@ public class TestStandbyCheckpoints {
|
||||||
|
|
||||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
|
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061))
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
|
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062)));
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nnTopology(topology)
|
.nnTopology(topology)
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper;
|
import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper;
|
||||||
import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags;
|
import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -53,7 +54,7 @@ public class TestOfflineEditsViewer {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String buildDir =
|
private static String buildDir =
|
||||||
System.getProperty("test.build.data", "build/test/data");
|
PathUtils.getTestDirName(TestOfflineEditsViewer.class);
|
||||||
|
|
||||||
private static String cacheDir =
|
private static String cacheDir =
|
||||||
System.getProperty("test.cache.data", "build/test/cache");
|
System.getProperty("test.cache.data", "build/test/cache");
|
||||||
|
|
|
@ -27,6 +27,7 @@ import java.io.FileReader;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
|
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -34,7 +35,7 @@ import org.junit.Test;
|
||||||
* on predetermined inputs
|
* on predetermined inputs
|
||||||
*/
|
*/
|
||||||
public class TestDelimitedImageVisitor {
|
public class TestDelimitedImageVisitor {
|
||||||
private static String ROOT = System.getProperty("test.build.data","/tmp");
|
private static String ROOT = PathUtils.getTestDirName(TestDelimitedImageVisitor.class);
|
||||||
private static final String delim = "--";
|
private static final String delim = "--";
|
||||||
|
|
||||||
// Record an element in the visitor and build the expected line in the output
|
// Record an element in the visitor and build the expected line in the output
|
||||||
|
|
|
@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -88,8 +89,7 @@ public class TestOfflineImageViewer {
|
||||||
final static HashMap<String, FileStatus> writtenFiles =
|
final static HashMap<String, FileStatus> writtenFiles =
|
||||||
new HashMap<String, FileStatus>();
|
new HashMap<String, FileStatus>();
|
||||||
|
|
||||||
private static String ROOT = System.getProperty("test.build.data",
|
private static String ROOT = PathUtils.getTestDirName(TestOfflineImageViewer.class);
|
||||||
"build/test/data");
|
|
||||||
|
|
||||||
// Create a populated namespace for later testing. Save its contents to a
|
// Create a populated namespace for later testing. Save its contents to a
|
||||||
// data structure and store its fsimage location.
|
// data structure and store its fsimage location.
|
||||||
|
|
|
@ -30,6 +30,7 @@ import java.io.OutputStream;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -40,10 +41,7 @@ public class TestAtomicFileOutputStream {
|
||||||
private static final String TEST_STRING = "hello world";
|
private static final String TEST_STRING = "hello world";
|
||||||
private static final String TEST_STRING_2 = "goodbye world";
|
private static final String TEST_STRING_2 = "goodbye world";
|
||||||
|
|
||||||
private static File BASE_DIR = new File(
|
private static File TEST_DIR = PathUtils.getTestDir(TestAtomicFileOutputStream.class);
|
||||||
System.getProperty("test.build.data", "build/test/data"));
|
|
||||||
private static File TEST_DIR = new File(BASE_DIR,
|
|
||||||
TestAtomicFileOutputStream.class.getName());
|
|
||||||
|
|
||||||
private static File DST_FILE = new File(TEST_DIR, "test.txt");
|
private static File DST_FILE = new File(TEST_DIR, "test.txt");
|
||||||
|
|
||||||
|
|
|
@ -29,14 +29,12 @@ import java.io.IOException;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.io.MD5Hash;
|
import org.apache.hadoop.io.MD5Hash;
|
||||||
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
public class TestMD5FileUtils {
|
public class TestMD5FileUtils {
|
||||||
private static final File TEST_DIR_ROOT = new File(
|
private static final File TEST_DIR = PathUtils.getTestDir(TestMD5FileUtils.class);
|
||||||
System.getProperty("test.build.data","build/test/data"));
|
|
||||||
private static final File TEST_DIR = new File(TEST_DIR_ROOT,
|
|
||||||
"TestMD5FileUtils");
|
|
||||||
private static final File TEST_FILE = new File(TEST_DIR,
|
private static final File TEST_FILE = new File(TEST_DIR,
|
||||||
"testMd5File.dat");
|
"testMd5File.dat");
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,6 @@ public class TestWebHdfsTimeouts {
|
||||||
|
|
||||||
private static final int CLIENTS_TO_CONSUME_BACKLOG = 100;
|
private static final int CLIENTS_TO_CONSUME_BACKLOG = 100;
|
||||||
private static final int CONNECTION_BACKLOG = 1;
|
private static final int CONNECTION_BACKLOG = 1;
|
||||||
private static final int INITIAL_SOCKET_TIMEOUT = URLUtils.SOCKET_TIMEOUT;
|
|
||||||
private static final int SHORT_SOCKET_TIMEOUT = 5;
|
private static final int SHORT_SOCKET_TIMEOUT = 5;
|
||||||
private static final int TEST_TIMEOUT = 10000;
|
private static final int TEST_TIMEOUT = 10000;
|
||||||
|
|
||||||
|
@ -67,20 +66,22 @@ public class TestWebHdfsTimeouts {
|
||||||
private InetSocketAddress nnHttpAddress;
|
private InetSocketAddress nnHttpAddress;
|
||||||
private ServerSocket serverSocket;
|
private ServerSocket serverSocket;
|
||||||
private Thread serverThread;
|
private Thread serverThread;
|
||||||
|
private URLConnectionFactory connectionFactory = new URLConnectionFactory(SHORT_SOCKET_TIMEOUT);
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
URLUtils.SOCKET_TIMEOUT = SHORT_SOCKET_TIMEOUT;
|
|
||||||
Configuration conf = WebHdfsTestUtil.createConf();
|
Configuration conf = WebHdfsTestUtil.createConf();
|
||||||
nnHttpAddress = NameNode.getHttpAddress(conf);
|
nnHttpAddress = NameNode.getHttpAddress(conf);
|
||||||
serverSocket = new ServerSocket(nnHttpAddress.getPort(), CONNECTION_BACKLOG);
|
serverSocket = new ServerSocket(nnHttpAddress.getPort(), CONNECTION_BACKLOG);
|
||||||
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
|
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
|
||||||
|
fs.connectionFactory = connectionFactory;
|
||||||
clients = new ArrayList<SocketChannel>();
|
clients = new ArrayList<SocketChannel>();
|
||||||
serverThread = null;
|
serverThread = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
|
fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||||
IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()]));
|
IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()]));
|
||||||
IOUtils.cleanup(LOG, fs);
|
IOUtils.cleanup(LOG, fs);
|
||||||
if (serverSocket != null) {
|
if (serverSocket != null) {
|
||||||
|
@ -240,7 +241,7 @@ public class TestWebHdfsTimeouts {
|
||||||
*/
|
*/
|
||||||
private void startSingleTemporaryRedirectResponseThread(
|
private void startSingleTemporaryRedirectResponseThread(
|
||||||
final boolean consumeConnectionBacklog) {
|
final boolean consumeConnectionBacklog) {
|
||||||
URLUtils.SOCKET_TIMEOUT = INITIAL_SOCKET_TIMEOUT;
|
fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||||
serverThread = new Thread() {
|
serverThread = new Thread() {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
|
@ -254,7 +255,7 @@ public class TestWebHdfsTimeouts {
|
||||||
clientSocket = serverSocket.accept();
|
clientSocket = serverSocket.accept();
|
||||||
|
|
||||||
// Immediately setup conditions for subsequent connections.
|
// Immediately setup conditions for subsequent connections.
|
||||||
URLUtils.SOCKET_TIMEOUT = SHORT_SOCKET_TIMEOUT;
|
fs.connectionFactory = connectionFactory;
|
||||||
if (consumeConnectionBacklog) {
|
if (consumeConnectionBacklog) {
|
||||||
consumeConnectionBacklog();
|
consumeConnectionBacklog();
|
||||||
}
|
}
|
||||||
|
|
|
@ -713,7 +713,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-surefire-plugin</artifactId>
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
<version>2.12.3</version>
|
<version>2.16</version>
|
||||||
</plugin>
|
</plugin>
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
@ -827,7 +827,7 @@
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-surefire-plugin</artifactId>
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
<configuration>
|
<configuration>
|
||||||
<forkMode>always</forkMode>
|
<reuseForks>false</reuseForks>
|
||||||
<forkedProcessTimeoutInSeconds>900</forkedProcessTimeoutInSeconds>
|
<forkedProcessTimeoutInSeconds>900</forkedProcessTimeoutInSeconds>
|
||||||
<argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError</argLine>
|
<argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError</argLine>
|
||||||
<environmentVariables>
|
<environmentVariables>
|
||||||
|
@ -1002,23 +1002,5 @@
|
||||||
</plugins>
|
</plugins>
|
||||||
</build>
|
</build>
|
||||||
</profile>
|
</profile>
|
||||||
<!-- Copied into specific modules supporting parallel testing. Will be uncommented as soon as all modules support this.
|
|
||||||
<profile>
|
|
||||||
<id>parallel-tests</id>
|
|
||||||
<build>
|
|
||||||
<plugins>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-surefire-plugin</artifactId>
|
|
||||||
<configuration>
|
|
||||||
<forkMode>perthread</forkMode>
|
|
||||||
<threadCount>${testsThreadCount}</threadCount>
|
|
||||||
<parallel>classes</parallel>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
|
||||||
</build>
|
|
||||||
</profile>
|
|
||||||
-->
|
|
||||||
</profiles>
|
</profiles>
|
||||||
</project>
|
</project>
|
||||||
|
|
Loading…
Reference in New Issue