HDFS-9444. Add utility to find set of available ephemeral ports to ServerSocketUtil. Contributed by Masatake Iwasaki

(cherry picked from commit e9a34ae29c)
(cherry picked from commit 5f754e8638)
This commit is contained in:
Brahma Reddy Battula 2016-09-28 10:50:50 +05:30 committed by Zhe Zhang
parent 4aea760676
commit 1e5c40b5b4
3 changed files with 54 additions and 10 deletions

View File

@ -60,4 +60,26 @@ public class ServerSocketUtil {
}
}
/**
* Find the specified number of unique ports available.
* The ports are all closed afterwards,
* so other network services started may grab those same ports.
*
* @param numPorts number of required port nubmers
* @return array of available port numbers
* @throws IOException
*/
public static int[] getPorts(int numPorts) throws IOException {
ServerSocket[] sockets = new ServerSocket[numPorts];
int[] ports = new int[numPorts];
for (int i = 0; i < numPorts; i++) {
ServerSocket sock = new ServerSocket(0);
sockets[i] = sock;
ports[i] = sock.getLocalPort();
}
for (ServerSocket sock : sockets) {
sock.close();
}
return ports;
}
}

View File

@ -169,6 +169,9 @@ Release 2.7.4 - UNRELEASED
HDFS-10301. Remove FBR tracking state to fix false zombie storage
detection for interleaving block reports. (Vinitha Gankidi via shv)
HDFS-9444. Add utility to find set of available ephemeral ports to
ServerSocketUtil. (Masatake Iwasaki via Brahma Reddy Battula)
Release 2.7.3 - 2016-08-25
INCOMPATIBLE CHANGES

View File

@ -19,8 +19,11 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.net.BindException;
import java.net.URI;
import org.apache.commons.logging.impl.Log4JLogger;
@ -37,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.net.ServerSocketUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@ -121,16 +125,31 @@ public class TestEditLogTailer {
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
MiniDFSCluster cluster = null;
for (int i = 0; i < 5; i++) {
try {
// Have to specify IPC ports so the NNs can talk to each other.
int[] ports = ServerSocketUtil.getPorts(2);
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
.addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032)));
.addNN(new MiniDFSNNTopology.NNConf("nn1")
.setIpcPort(ports[0]))
.addNN(new MiniDFSNNTopology.NNConf("nn2")
.setIpcPort(ports[1])));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
.numDataNodes(0)
.build();
break;
} catch (BindException e) {
// retry if race on ports given by ServerSocketUtil#getPorts
continue;
}
}
if (cluster == null) {
fail("failed to start mini cluster.");
}
try {
cluster.transitionToActive(activeIndex);
waitForLogRollInSharedDir(cluster, 3);