diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 23b22727e4a..3c529123804 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -2261,16 +2261,8 @@ function check_unittests
test_logfile=${PATCH_DIR}/testrun_${module_suffix}.txt
echo " Running tests in ${module_suffix}"
- # Temporary hack to run the parallel tests profile only for hadoop-common.
- # This code will be removed once hadoop-hdfs is ready for parallel test
- # execution.
- if [[ ${module} == "hadoop-common-project/hadoop-common" ]] ; then
- OPTIONAL_PARALLEL_TESTS_PROFILE=${PARALLEL_TESTS_PROFILE}
- else
- unset OPTIONAL_PARALLEL_TESTS_PROFILE
- fi
# shellcheck disable=2086
- echo_and_redirect "${test_logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} ${OPTIONAL_PARALLEL_TESTS_PROFILE} ${TESTS_THREAD_COUNT} -D${PROJECT_NAME}PatchProcess
+ echo_and_redirect "${test_logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} ${PARALLEL_TESTS_PROFILE} ${TESTS_THREAD_COUNT} -D${PROJECT_NAME}PatchProcess
test_build_result=$?
add_jira_footer "${module_suffix} test log" "@@BASE@@/testrun_${module_suffix}.txt"
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 89fe7f4fe11..65eab1931e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1541,6 +1541,9 @@ Release 2.8.0 - UNRELEASED
DataTransferProtocol sockets and allow configuring auto-tuning (He Tianyi
via Colin P. McCabe)
+ HDFS-9139. Enable parallel JUnit tests for HDFS Pre-commit
+ (Chris Nauroth and Vinayakumar B via vinayakumarb)
+
BUG FIXES
HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 016c2bfbd5c..f720d0b3476 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -503,14 +503,23 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
maven-antrun-plugin
- create-parallel-tests-dirs
+ create-parallel-tests-dirs
test-compile
-
-
-
-
+
@@ -524,10 +533,24 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
maven-surefire-plugin
${testsThreadCount}
- -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true
+ false
+ ${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true
${test.build.data}/${surefire.forkNumber}
+ ${test.build.dir}/${surefire.forkNumber}
${hadoop.tmp.dir}/${surefire.forkNumber}
+
+
+
+
+
+ ${test.build.data}
+
+
+
+
+
+ fork-${surefire.forkNumber}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
index 53513fd1504..02110ac7031 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
@@ -57,8 +57,6 @@ public class TestSWebHdfsFileContextMainOperations
protected static final byte[] data = getFileData(numBlocks,
getDefaultBlockSize());
- private static Configuration sslConf;
-
@BeforeClass
public static void clusterSetupAtBeginning()
throws IOException, LoginException, URISyntaxException {
@@ -67,15 +65,18 @@ public static void clusterSetupAtBeginning()
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
- sslConf = new Configuration();
-
try {
sslConfDir = KeyStoreTestUtil
.getClasspathDir(TestSWebHdfsFileContextMainOperations.class);
- KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, sslConf, false);
+ KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, CONF, false);
+ CONF.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getClientSSLConfigFileName());
+ CONF.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getServerSSLConfigFileName());
} catch (Exception ex) {
throw new RuntimeException(ex);
}
+
CONF.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "HTTPS_ONLY");
CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
CONF.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
index 9e660b23d13..37b2a2c0606 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
@@ -20,6 +20,7 @@
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
@@ -27,6 +28,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
import static org.junit.Assert.*;
@@ -129,6 +131,10 @@ protected HdfsConfiguration createSecureConfig(
String keystoresDir = baseDir.getAbsolutePath();
String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+ conf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getClientSSLConfigFileName());
+ conf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getServerSSLConfigFileName());
return conf;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
index b40e52a0f30..f95594a81ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
@@ -22,6 +22,7 @@
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
@@ -35,6 +36,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
import java.io.File;
@@ -128,6 +130,10 @@ public static void init() throws Exception {
String sslConfDir = KeyStoreTestUtil.getClasspathDir(
TestSecureNNWithQJM.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
+ baseConf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getClientSSLConfigFileName());
+ baseConf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getServerSSLConfigFileName());
}
@AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
index b01597a4deb..acd5a476f9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
@@ -612,14 +612,8 @@ public void testMultipleBlockPoolScanning() throws Exception {
// We scan 5 bytes per file (1 byte in file, 4 bytes of checksum)
final int BYTES_SCANNED_PER_FILE = 5;
- final int NUM_FILES[] = new int[] { 1, 5, 10 };
- int TOTAL_FILES = 0;
- for (int i = 0; i < NUM_FILES.length; i++) {
- TOTAL_FILES += NUM_FILES[i];
- }
- ctx.createFiles(0, NUM_FILES[0], 1);
- ctx.createFiles(0, NUM_FILES[1], 1);
- ctx.createFiles(0, NUM_FILES[2], 1);
+ int TOTAL_FILES = 16;
+ ctx.createFiles(0, TOTAL_FILES, 1);
// start scanning
final TestScanResultHandler.Info info =
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 0b473442acc..5c0a9712b17 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -277,13 +277,14 @@ public void testDataNodeTimeSpend() throws Exception {
}
for (int x =0; x < 50; x++) {
- String s = DFSTestUtil.readFile(fs, new Path("/time.txt." + x));
+ DFSTestUtil.readFile(fs, new Path("/time.txt." + x));
}
MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
long endWriteValue = getLongCounter("TotalWriteTime", rbNew);
long endReadValue = getLongCounter("TotalReadTime", rbNew);
-
+ // Lets Metric system update latest metrics
+ Thread.sleep(100);
assertTrue(endReadValue > startReadValue);
assertTrue(endWriteValue > startWriteValue);
} finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index af9a6f78120..d533cf6d5b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -263,6 +263,7 @@ public void run() {
}
latch.await();
+ Thread.sleep(10); // Lets all threads get BLOCKED
Assert.assertEquals("Expected number of blocked thread not found",
threadCount, rwLock.getQueueLength());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java
index 975a6649e60..7dc719e564a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java
@@ -74,6 +74,10 @@ public static void setUp() throws Exception {
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
+ conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getClientSSLConfigFileName());
+ conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getServerSSLConfigFileName());
}
@AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
index dca777f77fa..b284ed9f18b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
@@ -221,6 +221,10 @@ public void testHttpsBindHostKey() throws Exception {
LOG.info("Testing behavior without " + DFS_NAMENODE_HTTPS_BIND_HOST_KEY);
setupSsl();
+ conf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getClientSSLConfigFileName());
+ conf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getServerSSLConfigFileName());
conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
index feec7c88f26..3b609b0f4d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.net.ServerSocketUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@@ -127,10 +128,13 @@ private static void testStandbyTriggersLogRolls(int activeIndex)
// Have to specify IPC ports so the NNs can talk to each other.
MiniDFSNNTopology topology = new MiniDFSNNTopology()
- .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
- .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
- .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032))
- .addNN(new MiniDFSNNTopology.NNConf("nn3").setIpcPort(10033)));
+ .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+ .addNN(new MiniDFSNNTopology.NNConf("nn1")
+ .setIpcPort(ServerSocketUtil.getPort(0, 100)))
+ .addNN(new MiniDFSNNTopology.NNConf("nn2")
+ .setIpcPort(ServerSocketUtil.getPort(0, 100)))
+ .addNN(new MiniDFSNNTopology.NNConf("nn3")
+ .setIpcPort(ServerSocketUtil.getPort(0, 100))));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
@@ -152,13 +156,17 @@ private static void waitForLogRollInSharedDir(MiniDFSCluster cluster,
long startTxId) throws Exception {
URI sharedUri = cluster.getSharedEditsDir(0, 2);
File sharedDir = new File(sharedUri.getPath(), "current");
- final File expectedLog = new File(sharedDir,
- NNStorage.getInProgressEditsFileName(startTxId));
-
+ final File expectedInProgressLog =
+ new File(sharedDir, NNStorage.getInProgressEditsFileName(startTxId));
+ final File expectedFinalizedLog = new File(sharedDir,
+ NNStorage.getFinalizedEditsFileName(startTxId, startTxId + 1));
+ // There is a chance that multiple rolling happens by multiple NameNodes
+ // And expected inprogress file would have also finalized. So look for the
+ // finalized edits file as well
GenericTestUtils.waitFor(new Supplier() {
@Override
public Boolean get() {
- return expectedLog.exists();
+ return expectedInProgressLog.exists() || expectedFinalizedLog.exists();
}
}, 100, 10000);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java
index 7b7100b0a1d..1fccb3c51fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java
@@ -123,7 +123,7 @@ public void testMultipleAppendsDuringCatchupTailing() throws Exception {
if (!isTruncateReady) {
TestFileTruncate.checkBlockRecovery(fileToTruncate,
- cluster.getFileSystem(1));
+ cluster.getFileSystem(1), 300, 200);
}
AppendTestUtil.checkFullFile(fs, fileToTruncate, truncatePos[0], data,
fileToTruncate.toString());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java
index dbe80706763..bda928fd94b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java
@@ -44,7 +44,7 @@ public class TestSeveralNameNodes {
private static final Log LOG = LogFactory.getLog(TestSeveralNameNodes.class);
/** ms between failovers between NNs */
- private static final int TIME_BETWEEN_FAILOVERS = 200;
+ private static final int TIME_BETWEEN_FAILOVERS = 1000;
private static final int NUM_NAMENODES = 3;
private static final int NUM_THREADS = 3;
private static final int LIST_LENGTH = 50;
@@ -82,7 +82,7 @@ public void testCircularLinkedListWrites() throws Exception {
for (int i = 0; i < writers.size(); i++) {
CircularWriter writer = writers.get(i);
// remove the writer from the ones to check
- if (writer.done.await(10, TimeUnit.MILLISECONDS)) {
+ if (writer.done.await(100, TimeUnit.MILLISECONDS)) {
writers.remove(i--);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
index 8cee88f3086..6d541e4ddc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
@@ -62,6 +62,10 @@ public static void setUp() throws Exception {
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHttpsFileSystem.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+ conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getClientSSLConfigFileName());
+ conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getServerSSLConfigFileName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
index 69308351c00..5e8568c4421 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
@@ -214,7 +214,11 @@ public void testLazyTokenFetchForSWebhdfs() throws Exception {
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestWebHdfsTokens.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, clusterConf, false);
-
+ clusterConf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getClientSSLConfigFileName());
+ clusterConf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+ KeyStoreTestUtil.getServerSSLConfigFileName());
+
// trick the NN into thinking security is enabled w/o it trying
// to login from a keytab
UserGroupInformation.setConfiguration(clusterConf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
index c259b30aae8..d796753999c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdfs.web.oauth2;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.ServerSocketUtil;
import org.apache.hadoop.util.Timer;
import org.apache.http.HttpStatus;
import org.codehaus.jackson.map.ObjectMapper;
@@ -74,7 +75,7 @@ public Configuration buildConf(String credential, String tokenExpires,
@Test
public void refreshUrlIsCorrect() throws IOException {
- final int PORT = 7552;
+ final int PORT = ServerSocketUtil.getPort(0, 20);
final String REFRESH_ADDRESS = "http://localhost:" + PORT + "/refresh";
long tokenExpires = 0;