diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java index 023c1ed52fd..a294e745ee0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java @@ -102,4 +102,27 @@ public static int waitForPort(int port, int retries) } } } + + /** + * Find the specified number of unique ports available. + * The ports are all closed afterwards, + * so other network services started may grab those same ports. + * + * @param numPorts number of required port nubmers + * @return array of available port numbers + * @throws IOException + */ + public static int[] getPorts(int numPorts) throws IOException { + ServerSocket[] sockets = new ServerSocket[numPorts]; + int[] ports = new int[numPorts]; + for (int i = 0; i < numPorts; i++) { + ServerSocket sock = new ServerSocket(0); + sockets[i] = sock; + ports[i] = sock.getLocalPort(); + } + for (ServerSocket sock : sockets) { + sock.close(); + } + return ports; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index dc8bea75db6..ac97a3678f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -47,6 +47,7 @@ import javax.management.ObjectName; import java.io.File; import java.lang.management.ManagementFactory; +import java.net.BindException; import java.net.URI; import java.util.ArrayList; import java.util.Collection; @@ -59,6 +60,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * Class for testing {@link NameNodeMXBean} implementation @@ -431,17 +433,29 @@ public void testQueueLength() throws Exception { public void testNNDirectorySize() throws Exception{ Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); - // Have to specify IPC ports so the NNs can talk to each other. - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1") - .setIpcPort(ServerSocketUtil.getPort(0, 100))) - .addNN(new MiniDFSNNTopology.NNConf("nn2") - .setIpcPort(ServerSocketUtil.getPort(0, 100)))); + MiniDFSCluster cluster = null; + for (int i = 0; i < 5; i++) { + try{ + // Have to specify IPC ports so the NNs can talk to each other. + int[] ports = ServerSocketUtil.getPorts(2); + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(ports[0])) + .addNN( + new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ports[1]))); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology).numDataNodes(0) - .build(); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology).numDataNodes(0) + .build(); + break; + } catch (BindException e) { + // retry if race on ports given by ServerSocketUtil#getPorts + continue; + } + } + if (cluster == null) { + fail("failed to start mini cluster."); + } FileSystem fs = null; try { cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index 0d0873d730a..5a2aff90a7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -24,6 +24,7 @@ import java.io.File; import java.io.IOException; +import java.net.BindException; import java.net.URI; import java.util.ArrayList; import java.util.Collection; @@ -165,21 +166,34 @@ private static void testStandbyTriggersLogRolls(int activeIndex) conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100); - - // Have to specify IPC ports so the NNs can talk to each other. - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1") - .setIpcPort(ServerSocketUtil.getPort(0, 100))) - .addNN(new MiniDFSNNTopology.NNConf("nn2") - .setIpcPort(ServerSocketUtil.getPort(0, 100))) - .addNN(new MiniDFSNNTopology.NNConf("nn3") - .setIpcPort(ServerSocketUtil.getPort(0, 100)))); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology) - .numDataNodes(0) - .build(); + MiniDFSCluster cluster = null; + for (int i = 0; i < 5; i++) { + try { + // Have to specify IPC ports so the NNs can talk to each other. + int[] ports = ServerSocketUtil.getPorts(3); + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1") + .setIpcPort(ports[0])) + .addNN(new MiniDFSNNTopology.NNConf("nn2") + .setIpcPort(ports[1])) + .addNN(new MiniDFSNNTopology.NNConf("nn3") + .setIpcPort(ports[2]))); + + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology) + .numDataNodes(0) + .build(); + break; + } catch (BindException e) { + // retry if race on ports given by ServerSocketUtil#getPorts + continue; + } + } + if (cluster == null) { + fail("failed to start mini cluster."); + } try { cluster.transitionToActive(activeIndex); waitForLogRollInSharedDir(cluster, 3);