HDFS-9139. Enable parallel JUnit tests for HDFS Pre-commit (Contributed by Chris Nauroth and Vinayakumar B)
(cherry picked from commit 39581e3be2
)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java
This commit is contained in:
parent
b01eaa3b46
commit
b0b97dfbef
|
@ -2255,16 +2255,8 @@ function check_unittests
|
||||||
|
|
||||||
test_logfile=${PATCH_DIR}/testrun_${module_suffix}.txt
|
test_logfile=${PATCH_DIR}/testrun_${module_suffix}.txt
|
||||||
echo " Running tests in ${module_suffix}"
|
echo " Running tests in ${module_suffix}"
|
||||||
# Temporary hack to run the parallel tests profile only for hadoop-common.
|
|
||||||
# This code will be removed once hadoop-hdfs is ready for parallel test
|
|
||||||
# execution.
|
|
||||||
if [[ ${module} == "hadoop-common-project/hadoop-common" ]] ; then
|
|
||||||
OPTIONAL_PARALLEL_TESTS_PROFILE=${PARALLEL_TESTS_PROFILE}
|
|
||||||
else
|
|
||||||
unset OPTIONAL_PARALLEL_TESTS_PROFILE
|
|
||||||
fi
|
|
||||||
# shellcheck disable=2086
|
# shellcheck disable=2086
|
||||||
echo_and_redirect "${test_logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} ${OPTIONAL_PARALLEL_TESTS_PROFILE} ${TESTS_THREAD_COUNT} -D${PROJECT_NAME}PatchProcess
|
echo_and_redirect "${test_logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} ${PARALLEL_TESTS_PROFILE} ${TESTS_THREAD_COUNT} -D${PROJECT_NAME}PatchProcess
|
||||||
test_build_result=$?
|
test_build_result=$?
|
||||||
|
|
||||||
add_jira_footer "${module_suffix} test log" "@@BASE@@/testrun_${module_suffix}.txt"
|
add_jira_footer "${module_suffix} test log" "@@BASE@@/testrun_${module_suffix}.txt"
|
||||||
|
|
|
@ -714,6 +714,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
DataTransferProtocol sockets and allow configuring auto-tuning (He Tianyi
|
DataTransferProtocol sockets and allow configuring auto-tuning (He Tianyi
|
||||||
via Colin P. McCabe)
|
via Colin P. McCabe)
|
||||||
|
|
||||||
|
HDFS-9139. Enable parallel JUnit tests for HDFS Pre-commit
|
||||||
|
(Chris Nauroth and Vinayakumar B via vinayakumarb)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HDFS-8091: ACLStatus and XAttributes should be presented to
|
HDFS-8091: ACLStatus and XAttributes should be presented to
|
||||||
|
|
|
@ -424,14 +424,23 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>maven-antrun-plugin</artifactId>
|
<artifactId>maven-antrun-plugin</artifactId>
|
||||||
<executions>
|
<executions>
|
||||||
<execution>
|
<execution>
|
||||||
<id>create-parallel-tests-dirs</id>
|
<id>create-parallel-tests-dirs</id>
|
||||||
<phase>test-compile</phase>
|
<phase>test-compile</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<target>
|
<target>
|
||||||
<exec executable="${shell-executable}">
|
<script language="javascript"><![CDATA[
|
||||||
<arg value="-c"/>
|
var baseDirs = [
|
||||||
<arg value="for i in {1..${testsThreadCount}}; do mkdir -p ${test.build.data}/$i; mkdir -p ${hadoop.tmp.dir}/$i; done"/>
|
"${test.build.data}",
|
||||||
</exec>
|
"${test.build.dir}",
|
||||||
|
"${hadoop.tmp.dir}" ];
|
||||||
|
for (var i in baseDirs) {
|
||||||
|
for (var j = 1; j <= ${testsThreadCount}; ++j) {
|
||||||
|
var mkdir = project.createTask("mkdir");
|
||||||
|
mkdir.setDir(new java.io.File(baseDirs[i], j));
|
||||||
|
mkdir.perform();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]]></script>
|
||||||
</target>
|
</target>
|
||||||
</configuration>
|
</configuration>
|
||||||
<goals>
|
<goals>
|
||||||
|
@ -445,10 +454,24 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>maven-surefire-plugin</artifactId>
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
<configuration>
|
<configuration>
|
||||||
<forkCount>${testsThreadCount}</forkCount>
|
<forkCount>${testsThreadCount}</forkCount>
|
||||||
<argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true</argLine>
|
<reuseForks>false</reuseForks>
|
||||||
|
<argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
|
||||||
<systemPropertyVariables>
|
<systemPropertyVariables>
|
||||||
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
|
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
|
||||||
|
<test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
|
||||||
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
|
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
|
||||||
|
|
||||||
|
<!-- This is intentionally the same directory for all JUnit -->
|
||||||
|
<!-- forks, for use in the very rare situation that -->
|
||||||
|
<!-- concurrent tests need to coordinate, such as using lock -->
|
||||||
|
<!-- files. -->
|
||||||
|
<test.build.shared.data>${test.build.data}</test.build.shared.data>
|
||||||
|
|
||||||
|
<!-- Due to a Maven quirk, setting this to just -->
|
||||||
|
<!-- surefire.forkNumber won't do the parameter substitution. -->
|
||||||
|
<!-- Putting a prefix in front of it like "fork-" makes it -->
|
||||||
|
<!-- work. -->
|
||||||
|
<test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
|
||||||
</systemPropertyVariables>
|
</systemPropertyVariables>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
|
@ -57,8 +57,6 @@ public class TestSWebHdfsFileContextMainOperations
|
||||||
protected static final byte[] data = getFileData(numBlocks,
|
protected static final byte[] data = getFileData(numBlocks,
|
||||||
getDefaultBlockSize());
|
getDefaultBlockSize());
|
||||||
|
|
||||||
private static Configuration sslConf;
|
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void clusterSetupAtBeginning()
|
public static void clusterSetupAtBeginning()
|
||||||
throws IOException, LoginException, URISyntaxException {
|
throws IOException, LoginException, URISyntaxException {
|
||||||
|
@ -67,15 +65,18 @@ public class TestSWebHdfsFileContextMainOperations
|
||||||
FileUtil.fullyDelete(base);
|
FileUtil.fullyDelete(base);
|
||||||
base.mkdirs();
|
base.mkdirs();
|
||||||
keystoresDir = new File(BASEDIR).getAbsolutePath();
|
keystoresDir = new File(BASEDIR).getAbsolutePath();
|
||||||
sslConf = new Configuration();
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
sslConfDir = KeyStoreTestUtil
|
sslConfDir = KeyStoreTestUtil
|
||||||
.getClasspathDir(TestSWebHdfsFileContextMainOperations.class);
|
.getClasspathDir(TestSWebHdfsFileContextMainOperations.class);
|
||||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, sslConf, false);
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, CONF, false);
|
||||||
|
CONF.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getClientSSLConfigFileName());
|
||||||
|
CONF.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getServerSSLConfigFileName());
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
throw new RuntimeException(ex);
|
throw new RuntimeException(ex);
|
||||||
}
|
}
|
||||||
|
|
||||||
CONF.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "HTTPS_ONLY");
|
CONF.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "HTTPS_ONLY");
|
||||||
CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
CONF.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
CONF.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
|
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
|
||||||
|
@ -27,6 +28,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
|
@ -129,6 +131,10 @@ public abstract class SaslDataTransferTestCase {
|
||||||
String keystoresDir = baseDir.getAbsolutePath();
|
String keystoresDir = baseDir.getAbsolutePath();
|
||||||
String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
|
String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
|
||||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
||||||
|
conf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getClientSSLConfigFileName());
|
||||||
|
conf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getServerSSLConfigFileName());
|
||||||
return conf;
|
return conf;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import static org.junit.Assert.*;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
|
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
|
||||||
|
@ -35,6 +36,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KE
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
@ -128,6 +130,10 @@ public class TestSecureNNWithQJM {
|
||||||
String sslConfDir = KeyStoreTestUtil.getClasspathDir(
|
String sslConfDir = KeyStoreTestUtil.getClasspathDir(
|
||||||
TestSecureNNWithQJM.class);
|
TestSecureNNWithQJM.class);
|
||||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
|
||||||
|
baseConf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getClientSSLConfigFileName());
|
||||||
|
baseConf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getServerSSLConfigFileName());
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
|
|
@ -612,14 +612,8 @@ public class TestBlockScanner {
|
||||||
|
|
||||||
// We scan 5 bytes per file (1 byte in file, 4 bytes of checksum)
|
// We scan 5 bytes per file (1 byte in file, 4 bytes of checksum)
|
||||||
final int BYTES_SCANNED_PER_FILE = 5;
|
final int BYTES_SCANNED_PER_FILE = 5;
|
||||||
final int NUM_FILES[] = new int[] { 1, 5, 10 };
|
int TOTAL_FILES = 16;
|
||||||
int TOTAL_FILES = 0;
|
ctx.createFiles(0, TOTAL_FILES, 1);
|
||||||
for (int i = 0; i < NUM_FILES.length; i++) {
|
|
||||||
TOTAL_FILES += NUM_FILES[i];
|
|
||||||
}
|
|
||||||
ctx.createFiles(0, NUM_FILES[0], 1);
|
|
||||||
ctx.createFiles(0, NUM_FILES[1], 1);
|
|
||||||
ctx.createFiles(0, NUM_FILES[2], 1);
|
|
||||||
|
|
||||||
// start scanning
|
// start scanning
|
||||||
final TestScanResultHandler.Info info =
|
final TestScanResultHandler.Info info =
|
||||||
|
|
|
@ -277,13 +277,14 @@ public class TestDataNodeMetrics {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int x =0; x < 50; x++) {
|
for (int x =0; x < 50; x++) {
|
||||||
String s = DFSTestUtil.readFile(fs, new Path("/time.txt." + x));
|
DFSTestUtil.readFile(fs, new Path("/time.txt." + x));
|
||||||
}
|
}
|
||||||
|
|
||||||
MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
|
MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
|
||||||
long endWriteValue = getLongCounter("TotalWriteTime", rbNew);
|
long endWriteValue = getLongCounter("TotalWriteTime", rbNew);
|
||||||
long endReadValue = getLongCounter("TotalReadTime", rbNew);
|
long endReadValue = getLongCounter("TotalReadTime", rbNew);
|
||||||
|
// Lets Metric system update latest metrics
|
||||||
|
Thread.sleep(100);
|
||||||
assertTrue(endReadValue > startReadValue);
|
assertTrue(endReadValue > startReadValue);
|
||||||
assertTrue(endWriteValue > startWriteValue);
|
assertTrue(endWriteValue > startWriteValue);
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -263,6 +263,7 @@ public class TestFSNamesystem {
|
||||||
}
|
}
|
||||||
|
|
||||||
latch.await();
|
latch.await();
|
||||||
|
Thread.sleep(10); // Lets all threads get BLOCKED
|
||||||
Assert.assertEquals("Expected number of blocked thread not found",
|
Assert.assertEquals("Expected number of blocked thread not found",
|
||||||
threadCount, rwLock.getQueueLength());
|
threadCount, rwLock.getQueueLength());
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,6 +74,10 @@ public class TestNameNodeHttpServer {
|
||||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
||||||
connectionFactory = URLConnectionFactory
|
connectionFactory = URLConnectionFactory
|
||||||
.newDefaultURLConnectionFactory(conf);
|
.newDefaultURLConnectionFactory(conf);
|
||||||
|
conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getClientSSLConfigFileName());
|
||||||
|
conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getServerSSLConfigFileName());
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
|
|
@ -223,6 +223,10 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
LOG.info("Testing behavior without " + DFS_NAMENODE_HTTPS_BIND_HOST_KEY);
|
LOG.info("Testing behavior without " + DFS_NAMENODE_HTTPS_BIND_HOST_KEY);
|
||||||
|
|
||||||
setupSsl();
|
setupSsl();
|
||||||
|
conf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getClientSSLConfigFileName());
|
||||||
|
conf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getServerSSLConfigFileName());
|
||||||
|
|
||||||
conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
|
import org.apache.hadoop.net.ServerSocketUtil;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -122,9 +123,11 @@ public class TestEditLogTailer {
|
||||||
|
|
||||||
// Have to specify IPC ports so the NNs can talk to each other.
|
// Have to specify IPC ports so the NNs can talk to each other.
|
||||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
|
.addNN(new MiniDFSNNTopology.NNConf("nn1")
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032)));
|
.setIpcPort(ServerSocketUtil.getPort(0, 100)))
|
||||||
|
.addNN(new MiniDFSNNTopology.NNConf("nn2")
|
||||||
|
.setIpcPort(ServerSocketUtil.getPort(0, 100))));
|
||||||
|
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nnTopology(topology)
|
.nnTopology(topology)
|
||||||
|
@ -146,13 +149,17 @@ public class TestEditLogTailer {
|
||||||
long startTxId) throws Exception {
|
long startTxId) throws Exception {
|
||||||
URI sharedUri = cluster.getSharedEditsDir(0, 1);
|
URI sharedUri = cluster.getSharedEditsDir(0, 1);
|
||||||
File sharedDir = new File(sharedUri.getPath(), "current");
|
File sharedDir = new File(sharedUri.getPath(), "current");
|
||||||
final File expectedLog = new File(sharedDir,
|
final File expectedInProgressLog =
|
||||||
NNStorage.getInProgressEditsFileName(startTxId));
|
new File(sharedDir, NNStorage.getInProgressEditsFileName(startTxId));
|
||||||
|
final File expectedFinalizedLog = new File(sharedDir,
|
||||||
|
NNStorage.getFinalizedEditsFileName(startTxId, startTxId + 1));
|
||||||
|
// There is a chance that multiple rolling happens by multiple NameNodes
|
||||||
|
// And expected inprogress file would have also finalized. So look for the
|
||||||
|
// finalized edits file as well
|
||||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||||
@Override
|
@Override
|
||||||
public Boolean get() {
|
public Boolean get() {
|
||||||
return expectedLog.exists();
|
return expectedInProgressLog.exists() || expectedFinalizedLog.exists();
|
||||||
}
|
}
|
||||||
}, 100, 10000);
|
}, 100, 10000);
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,7 +123,7 @@ public class TestHAAppend {
|
||||||
|
|
||||||
if (!isTruncateReady) {
|
if (!isTruncateReady) {
|
||||||
TestFileTruncate.checkBlockRecovery(fileToTruncate,
|
TestFileTruncate.checkBlockRecovery(fileToTruncate,
|
||||||
cluster.getFileSystem(1));
|
cluster.getFileSystem(1), 300, 200);
|
||||||
}
|
}
|
||||||
AppendTestUtil.checkFullFile(fs, fileToTruncate, truncatePos[0], data,
|
AppendTestUtil.checkFullFile(fs, fileToTruncate, truncatePos[0], data,
|
||||||
fileToTruncate.toString());
|
fileToTruncate.toString());
|
||||||
|
|
|
@ -65,6 +65,10 @@ public class TestHttpsFileSystem {
|
||||||
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHttpsFileSystem.class);
|
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHttpsFileSystem.class);
|
||||||
|
|
||||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
||||||
|
conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getClientSSLConfigFileName());
|
||||||
|
conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getServerSSLConfigFileName());
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
|
@ -216,7 +216,11 @@ public class TestWebHdfsTokens {
|
||||||
keystoresDir = new File(BASEDIR).getAbsolutePath();
|
keystoresDir = new File(BASEDIR).getAbsolutePath();
|
||||||
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestWebHdfsTokens.class);
|
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestWebHdfsTokens.class);
|
||||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, clusterConf, false);
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, clusterConf, false);
|
||||||
|
clusterConf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getClientSSLConfigFileName());
|
||||||
|
clusterConf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getServerSSLConfigFileName());
|
||||||
|
|
||||||
// trick the NN into thinking security is enabled w/o it trying
|
// trick the NN into thinking security is enabled w/o it trying
|
||||||
// to login from a keytab
|
// to login from a keytab
|
||||||
UserGroupInformation.setConfiguration(clusterConf);
|
UserGroupInformation.setConfiguration(clusterConf);
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
package org.apache.hadoop.hdfs.web.oauth2;
|
package org.apache.hadoop.hdfs.web.oauth2;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.net.ServerSocketUtil;
|
||||||
import org.apache.hadoop.util.Timer;
|
import org.apache.hadoop.util.Timer;
|
||||||
import org.apache.http.HttpStatus;
|
import org.apache.http.HttpStatus;
|
||||||
import org.codehaus.jackson.map.ObjectMapper;
|
import org.codehaus.jackson.map.ObjectMapper;
|
||||||
|
@ -74,7 +75,7 @@ public class TestClientCredentialTimeBasedTokenRefresher {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void refreshUrlIsCorrect() throws IOException {
|
public void refreshUrlIsCorrect() throws IOException {
|
||||||
final int PORT = 7552;
|
final int PORT = ServerSocketUtil.getPort(0, 20);
|
||||||
final String REFRESH_ADDRESS = "http://localhost:" + PORT + "/refresh";
|
final String REFRESH_ADDRESS = "http://localhost:" + PORT + "/refresh";
|
||||||
|
|
||||||
long tokenExpires = 0;
|
long tokenExpires = 0;
|
||||||
|
|
Loading…
Reference in New Issue