diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 99122f42bc6..346d23c9317 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -283,6 +283,9 @@ Release 2.5.0 - UNRELEASED HDFS-6227. ShortCircuitCache#unref should purge ShortCircuitReplicas whose streams have been closed by java interrupts. (Colin Patrick McCabe via jing9) + HDFS-6442. Fix TestEditLogAutoroll and TestStandbyCheckpoints failure + caused by port conficts. (Zesheng Wu via Arpit Agarwal) + HDFS-6448. BlockReaderLocalLegacy should set socket timeout based on conf.socketTimeout (liangxie via cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java index bea5ee4e567..3a0ae5c5f2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java @@ -17,11 +17,16 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.net.BindException; +import java.util.Random; + import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -43,6 +48,9 @@ public class TestEditLogAutoroll { private NameNode nn0; private FileSystem fs; private FSEditLog editLog; + private final Random random = new Random(); + + private static final Log LOG = LogFactory.getLog(TestEditLog.class); @Before public void setUp() throws Exception { @@ -54,24 +62,35 @@ public class TestEditLogAutoroll { conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f); conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100); - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062))); + int retryCount = 0; + while (true) { + try { + int basePort = 10060 + random.nextInt(100) * 2; + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))); - cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology) - .numDataNodes(0) - .build(); - cluster.waitActive(); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology) + .numDataNodes(0) + .build(); + cluster.waitActive(); - nn0 = cluster.getNameNode(0); - fs = HATestUtil.configureFailoverFs(cluster, conf); + nn0 = cluster.getNameNode(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); - cluster.transitionToActive(0); + cluster.transitionToActive(0); - fs = cluster.getFileSystem(0); - editLog = nn0.getNamesystem().getEditLog(); + fs = cluster.getFileSystem(0); + editLog = nn0.getNamesystem().getEditLog(); + ++retryCount; + break; + } catch (BindException e) { + LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + + retryCount + " times"); + } + } } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 2b5e89af8b8..fd579026e78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -25,12 +25,14 @@ import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.io.OutputStream; +import java.net.BindException; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; import java.net.URI; import java.net.URL; import java.util.List; +import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -73,6 +75,7 @@ public class TestStandbyCheckpoints { protected MiniDFSCluster cluster; protected NameNode nn0, nn1; protected FileSystem fs; + private final Random random = new Random(); protected File tmpOivImgDir; private static final Log LOG = LogFactory.getLog(TestStandbyCheckpoints.class); @@ -87,22 +90,33 @@ public class TestStandbyCheckpoints { conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0); - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062))); - - cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology) - .numDataNodes(0) - .build(); - cluster.waitActive(); - - nn0 = cluster.getNameNode(0); - nn1 = cluster.getNameNode(1); - fs = HATestUtil.configureFailoverFs(cluster, conf); + int retryCount = 0; + while (true) { + try { + int basePort = 10060 + random.nextInt(100) * 2; + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))); - cluster.transitionToActive(0); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology) + .numDataNodes(0) + .build(); + cluster.waitActive(); + + nn0 = cluster.getNameNode(0); + nn1 = cluster.getNameNode(1); + fs = HATestUtil.configureFailoverFs(cluster, conf); + + cluster.transitionToActive(0); + ++retryCount; + break; + } catch (BindException e) { + LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + + retryCount + " times"); + } + } } protected Configuration setupCommonConfig() {