diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java index 023c1ed52fd..a294e745ee0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java @@ -102,4 +102,27 @@ public class ServerSocketUtil { } } } + + /** + * Find the specified number of unique ports available. + * The ports are all closed afterwards, + * so other network services started may grab those same ports. + * + * @param numPorts number of required port nubmers + * @return array of available port numbers + * @throws IOException + */ + public static int[] getPorts(int numPorts) throws IOException { + ServerSocket[] sockets = new ServerSocket[numPorts]; + int[] ports = new int[numPorts]; + for (int i = 0; i < numPorts; i++) { + ServerSocket sock = new ServerSocket(0); + sockets[i] = sock; + ports[i] = sock.getLocalPort(); + } + for (ServerSocket sock : sockets) { + sock.close(); + } + return ports; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index c9b0aa15580..25250c50716 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -47,6 +47,7 @@ import javax.management.MBeanServer; import javax.management.ObjectName; import java.io.File; import java.lang.management.ManagementFactory; +import java.net.BindException; import java.net.URI; import java.util.ArrayList; import java.util.Collection; @@ -59,6 +60,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * Class for testing {@link NameNodeMXBean} implementation @@ -431,17 +433,29 @@ public class TestNameNodeMXBean { public void testNNDirectorySize() throws Exception{ Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); - // Have to specify IPC ports so the NNs can talk to each other. - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1") - .setIpcPort(ServerSocketUtil.getPort(0, 100))) - .addNN(new MiniDFSNNTopology.NNConf("nn2") - .setIpcPort(ServerSocketUtil.getPort(0, 100)))); + MiniDFSCluster cluster = null; + for (int i = 0; i < 5; i++) { + try{ + // Have to specify IPC ports so the NNs can talk to each other. + int[] ports = ServerSocketUtil.getPorts(2); + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(ports[0])) + .addNN( + new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ports[1]))); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology).numDataNodes(0) - .build(); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology).numDataNodes(0) + .build(); + break; + } catch (BindException e) { + // retry if race on ports given by ServerSocketUtil#getPorts + continue; + } + } + if (cluster == null) { + fail("failed to start mini cluster."); + } FileSystem fs = null; try { cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index 1d13bbe938b..dec4506a5f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -19,10 +19,12 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import static org.mockito.Mockito.when; import java.io.File; import java.io.IOException; +import java.net.BindException; import java.net.URI; import java.util.ArrayList; import java.util.Collection; @@ -156,19 +158,32 @@ public class TestEditLogTailer { // Roll every 1s conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); - - // Have to specify IPC ports so the NNs can talk to each other. - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1") - .setIpcPort(ServerSocketUtil.getPort(0, 100))) - .addNN(new MiniDFSNNTopology.NNConf("nn2") - .setIpcPort(ServerSocketUtil.getPort(0, 100)))); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology) - .numDataNodes(0) - .build(); + MiniDFSCluster cluster = null; + for (int i = 0; i < 5; i++) { + try { + // Have to specify IPC ports so the NNs can talk to each other. + int[] ports = ServerSocketUtil.getPorts(2); + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1") + .setIpcPort(ports[0])) + .addNN(new MiniDFSNNTopology.NNConf("nn2") + .setIpcPort(ports[1]))); + + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology) + .numDataNodes(0) + .build(); + break; + } catch (BindException e) { + // retry if race on ports given by ServerSocketUtil#getPorts + continue; + } + } + if (cluster == null) { + fail("failed to start mini cluster."); + } try { cluster.transitionToActive(activeIndex); waitForLogRollInSharedDir(cluster, 3);