HDFS-9444. Add utility to find set of available ephemeral ports to ServerSocketUtil. Contributed by Masatake Iwasaki
(cherry picked from commite9a34ae29c
) (cherry picked from commit5f754e8638
)
This commit is contained in:
parent
4aea760676
commit
1e5c40b5b4
|
@ -60,4 +60,26 @@ public class ServerSocketUtil {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find the specified number of unique ports available.
|
||||||
|
* The ports are all closed afterwards,
|
||||||
|
* so other network services started may grab those same ports.
|
||||||
|
*
|
||||||
|
* @param numPorts number of required port nubmers
|
||||||
|
* @return array of available port numbers
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static int[] getPorts(int numPorts) throws IOException {
|
||||||
|
ServerSocket[] sockets = new ServerSocket[numPorts];
|
||||||
|
int[] ports = new int[numPorts];
|
||||||
|
for (int i = 0; i < numPorts; i++) {
|
||||||
|
ServerSocket sock = new ServerSocket(0);
|
||||||
|
sockets[i] = sock;
|
||||||
|
ports[i] = sock.getLocalPort();
|
||||||
|
}
|
||||||
|
for (ServerSocket sock : sockets) {
|
||||||
|
sock.close();
|
||||||
|
}
|
||||||
|
return ports;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -169,6 +169,9 @@ Release 2.7.4 - UNRELEASED
|
||||||
HDFS-10301. Remove FBR tracking state to fix false zombie storage
|
HDFS-10301. Remove FBR tracking state to fix false zombie storage
|
||||||
detection for interleaving block reports. (Vinitha Gankidi via shv)
|
detection for interleaving block reports. (Vinitha Gankidi via shv)
|
||||||
|
|
||||||
|
HDFS-9444. Add utility to find set of available ephemeral ports to
|
||||||
|
ServerSocketUtil. (Masatake Iwasaki via Brahma Reddy Battula)
|
||||||
|
|
||||||
Release 2.7.3 - 2016-08-25
|
Release 2.7.3 - 2016-08-25
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -19,8 +19,11 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||||
|
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.net.BindException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
|
@ -37,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
|
import org.apache.hadoop.net.ServerSocketUtil;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -120,17 +124,32 @@ public class TestEditLogTailer {
|
||||||
// Roll every 1s
|
// Roll every 1s
|
||||||
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
|
||||||
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
||||||
|
|
||||||
// Have to specify IPC ports so the NNs can talk to each other.
|
|
||||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
|
||||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
|
|
||||||
.addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032)));
|
|
||||||
|
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
MiniDFSCluster cluster = null;
|
||||||
.nnTopology(topology)
|
for (int i = 0; i < 5; i++) {
|
||||||
.numDataNodes(0)
|
try {
|
||||||
.build();
|
// Have to specify IPC ports so the NNs can talk to each other.
|
||||||
|
int[] ports = ServerSocketUtil.getPorts(2);
|
||||||
|
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||||
|
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||||
|
.addNN(new MiniDFSNNTopology.NNConf("nn1")
|
||||||
|
.setIpcPort(ports[0]))
|
||||||
|
.addNN(new MiniDFSNNTopology.NNConf("nn2")
|
||||||
|
.setIpcPort(ports[1])));
|
||||||
|
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
.nnTopology(topology)
|
||||||
|
.numDataNodes(0)
|
||||||
|
.build();
|
||||||
|
break;
|
||||||
|
} catch (BindException e) {
|
||||||
|
// retry if race on ports given by ServerSocketUtil#getPorts
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (cluster == null) {
|
||||||
|
fail("failed to start mini cluster.");
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
cluster.transitionToActive(activeIndex);
|
cluster.transitionToActive(activeIndex);
|
||||||
waitForLogRollInSharedDir(cluster, 3);
|
waitForLogRollInSharedDir(cluster, 3);
|
||||||
|
|
Loading…
Reference in New Issue