HDFS-6768. Fix a few unit tests that use hard-coded port numbers. (Arpit Agarwal)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614732 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
bda9c584c8
commit
18360e71f1
|
@ -956,6 +956,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config
|
||||
(brandonli)
|
||||
|
||||
HDFS-6768. Fix a few unit tests that use hard-coded port numbers. (Arpit
|
||||
Agarwal)
|
||||
|
||||
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.mockito.Mockito;
|
|||
public class TestGenericRefresh {
|
||||
private static MiniDFSCluster cluster;
|
||||
private static Configuration config;
|
||||
private static final int NNPort = 54222;
|
||||
|
||||
private static RefreshHandler firstHandler;
|
||||
private static RefreshHandler secondHandler;
|
||||
|
@ -57,8 +56,8 @@ public class TestGenericRefresh {
|
|||
config = new Configuration();
|
||||
config.set("hadoop.security.authorization", "true");
|
||||
|
||||
FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
|
||||
cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
|
||||
FileSystem.setDefaultUri(config, "hdfs://localhost:0");
|
||||
cluster = new MiniDFSCluster.Builder(config).build();
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
|
@ -103,7 +102,8 @@ public class TestGenericRefresh {
|
|||
@Test
|
||||
public void testInvalidIdentifier() throws Exception {
|
||||
DFSAdmin admin = new DFSAdmin(config);
|
||||
String [] args = new String[]{"-refresh", "localhost:" + NNPort, "unregisteredIdentity"};
|
||||
String [] args = new String[]{"-refresh", "localhost:" +
|
||||
cluster.getNameNodePort(), "unregisteredIdentity"};
|
||||
int exitCode = admin.run(args);
|
||||
assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
|
||||
}
|
||||
|
@ -111,7 +111,8 @@ public class TestGenericRefresh {
|
|||
@Test
|
||||
public void testValidIdentifier() throws Exception {
|
||||
DFSAdmin admin = new DFSAdmin(config);
|
||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
|
||||
String[] args = new String[]{"-refresh",
|
||||
"localhost:" + cluster.getNameNodePort(), "firstHandler"};
|
||||
int exitCode = admin.run(args);
|
||||
assertEquals("DFSAdmin should succeed", 0, exitCode);
|
||||
|
||||
|
@ -124,11 +125,13 @@ public class TestGenericRefresh {
|
|||
@Test
|
||||
public void testVariableArgs() throws Exception {
|
||||
DFSAdmin admin = new DFSAdmin(config);
|
||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one"};
|
||||
String[] args = new String[]{"-refresh", "localhost:" +
|
||||
cluster.getNameNodePort(), "secondHandler", "one"};
|
||||
int exitCode = admin.run(args);
|
||||
assertEquals("DFSAdmin should return 2", 2, exitCode);
|
||||
|
||||
exitCode = admin.run(new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one", "two"});
|
||||
exitCode = admin.run(new String[]{"-refresh", "localhost:" +
|
||||
cluster.getNameNodePort(), "secondHandler", "one", "two"});
|
||||
assertEquals("DFSAdmin should now return 3", 3, exitCode);
|
||||
|
||||
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
|
||||
|
@ -141,7 +144,8 @@ public class TestGenericRefresh {
|
|||
|
||||
// And now this should fail
|
||||
DFSAdmin admin = new DFSAdmin(config);
|
||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
|
||||
String[] args = new String[]{"-refresh", "localhost:" +
|
||||
cluster.getNameNodePort(), "firstHandler"};
|
||||
int exitCode = admin.run(args);
|
||||
assertEquals("DFSAdmin should return -1", -1, exitCode);
|
||||
}
|
||||
|
@ -161,7 +165,8 @@ public class TestGenericRefresh {
|
|||
|
||||
// this should trigger both
|
||||
DFSAdmin admin = new DFSAdmin(config);
|
||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "sharedId", "one"};
|
||||
String[] args = new String[]{"-refresh", "localhost:" +
|
||||
cluster.getNameNodePort(), "sharedId", "one"};
|
||||
int exitCode = admin.run(args);
|
||||
assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
|
||||
|
||||
|
@ -189,7 +194,8 @@ public class TestGenericRefresh {
|
|||
|
||||
// We refresh both
|
||||
DFSAdmin admin = new DFSAdmin(config);
|
||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "shared"};
|
||||
String[] args = new String[]{"-refresh", "localhost:" +
|
||||
cluster.getNameNodePort(), "shared"};
|
||||
int exitCode = admin.run(args);
|
||||
assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
|
||||
|
||||
|
@ -215,7 +221,8 @@ public class TestGenericRefresh {
|
|||
RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
|
||||
|
||||
DFSAdmin admin = new DFSAdmin(config);
|
||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "exceptional"};
|
||||
String[] args = new String[]{"-refresh", "localhost:" +
|
||||
cluster.getNameNodePort(), "exceptional"};
|
||||
int exitCode = admin.run(args);
|
||||
assertEquals(-1, exitCode); // Exceptions result in a -1
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@ import static org.junit.Assert.assertTrue;
|
|||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.BindException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
|
@ -42,24 +44,42 @@ public class TestRefreshCallQueue {
|
|||
private FileSystem fs;
|
||||
static int mockQueueConstructions;
|
||||
static int mockQueuePuts;
|
||||
private static final int NNPort = 54222;
|
||||
private static String CALLQUEUE_CONFIG_KEY = "ipc." + NNPort + ".callqueue.impl";
|
||||
private String callQueueConfigKey = "";
|
||||
private final Random rand = new Random();
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
// We want to count additional events, so we reset here
|
||||
mockQueueConstructions = 0;
|
||||
mockQueuePuts = 0;
|
||||
int portRetries = 5;
|
||||
int nnPort;
|
||||
|
||||
config = new Configuration();
|
||||
config.setClass(CALLQUEUE_CONFIG_KEY,
|
||||
MockCallQueue.class, BlockingQueue.class);
|
||||
config.set("hadoop.security.authorization", "true");
|
||||
for (; portRetries > 0; --portRetries) {
|
||||
// Pick a random port in the range [30000,60000).
|
||||
nnPort = 30000 + rand.nextInt(30000);
|
||||
config = new Configuration();
|
||||
callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
|
||||
config.setClass(callQueueConfigKey,
|
||||
MockCallQueue.class, BlockingQueue.class);
|
||||
config.set("hadoop.security.authorization", "true");
|
||||
|
||||
FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
|
||||
fs = FileSystem.get(config);
|
||||
cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
|
||||
cluster.waitActive();
|
||||
FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
|
||||
fs = FileSystem.get(config);
|
||||
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
|
||||
cluster.waitActive();
|
||||
break;
|
||||
} catch (BindException be) {
|
||||
// Retry with a different port number.
|
||||
}
|
||||
}
|
||||
|
||||
if (portRetries == 0) {
|
||||
// Bail if we get very unlucky with our choice of ports.
|
||||
fail("Failed to pick an ephemeral port for the NameNode RPC server.");
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -590,7 +590,6 @@ public class TestBlockRecovery {
|
|||
Configuration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000");
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleSingleNN(8020, 50070))
|
||||
.numDataNodes(1).build();
|
||||
try {
|
||||
cluster.waitClusterUp();
|
||||
|
|
Loading…
Reference in New Issue