diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java index 4f74c7a5d90..8ccbd483335 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; @@ -345,7 +346,12 @@ public class TestBlockTokenWithDFS { Configuration conf = getConf(numDataNodes); try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); + // prefer non-ephemeral port to avoid port collision on restartNameNode + cluster = new MiniDFSCluster.Builder(conf) + .nameNodePort(ServerSocketUtil.getPort(19820, 100)) + .nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100)) + .numDataNodes(numDataNodes) + .build(); cluster.waitActive(); assertEquals(numDataNodes, cluster.getDataNodes().size()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java index dfdcf3483c3..bbb787e1d75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.MockNameNodeResourceChecker; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; +import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread; @@ -75,14 +76,21 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes { conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); - - conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10023); - conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10024); + // Get random port numbers in advance. Because ZKFCs and DFSHAAdmin + // needs rpc port numbers of all ZKFCs, Setting 0 does not work here. + conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", + ServerSocketUtil.getPort(10023, 100)); + conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", + ServerSocketUtil.getPort(10024, 100)); + + // prefer non-ephemeral port to avoid port collision on restartNameNode MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022))); + .addNN(new MiniDFSNNTopology.NNConf("nn1") + .setIpcPort(ServerSocketUtil.getPort(10021, 100))) + .addNN(new MiniDFSNNTopology.NNConf("nn2") + .setIpcPort(ServerSocketUtil.getPort(10022, 100)))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0)