HDFS-16522. Set Http and Ipc ports for Datanodes in MiniDFSCluster (#4108)

Signed-off-by: Akira Ajisaka <aajisaka@apache.org>
This commit is contained in:
Viraj Jasani 2022-04-06 14:47:02 +05:30 committed by GitHub
parent 61bbdfd3a7
commit 7c20602b17
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 167 additions and 30 deletions

View File

@ -203,6 +203,8 @@ public class MiniDFSCluster implements AutoCloseable {
private int nameNodeHttpPort = 0;
private final Configuration conf;
private int numDataNodes = 1;
private int[] dnHttpPorts = null;
private int[] dnIpcPorts = null;
private StorageType[][] storageTypes = null;
private StorageType[] storageTypes1D = null;
private int storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE;
@ -277,6 +279,16 @@ public class MiniDFSCluster implements AutoCloseable {
return this;
}
public Builder setDnHttpPorts(int... ports) {
this.dnHttpPorts = ports;
return this;
}
public Builder setDnIpcPorts(int... ports) {
this.dnIpcPorts = ports;
return this;
}
/**
* Default: DEFAULT_STORAGES_PER_DATANODE
*/
@ -599,7 +611,9 @@ public class MiniDFSCluster implements AutoCloseable {
builder.checkDataNodeHostConfig,
builder.dnConfOverlays,
builder.skipFsyncForTesting,
builder.useConfiguredTopologyMappingClass);
builder.useConfiguredTopologyMappingClass,
builder.dnHttpPorts,
builder.dnIpcPorts);
}
public static class DataNodeProperties {
@ -873,7 +887,7 @@ public class MiniDFSCluster implements AutoCloseable {
operation, null, racks, hosts,
null, simulatedCapacities, null, true, false,
MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0),
true, false, false, null, true, false);
true, false, false, null, true, false, null, null);
}
private void initMiniDFSCluster(
@ -891,7 +905,9 @@ public class MiniDFSCluster implements AutoCloseable {
boolean checkDataNodeHostConfig,
Configuration[] dnConfOverlays,
boolean skipFsyncForTesting,
boolean useConfiguredTopologyMappingClass)
boolean useConfiguredTopologyMappingClass,
int[] dnHttpPorts,
int[] dnIpcPorts)
throws IOException {
boolean success = false;
try {
@ -974,9 +990,9 @@ public class MiniDFSCluster implements AutoCloseable {
// Start the DataNodes
startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
dnStartOpt != null ? dnStartOpt : startOpt,
racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
dnStartOpt != null ? dnStartOpt : startOpt, racks, hosts, storageCapacities,
simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig,
dnConfOverlays, dnHttpPorts, dnIpcPorts);
waitClusterUp();
//make sure ProxyUsers uses the latest conf
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@ -1598,8 +1614,8 @@ public class MiniDFSCluster implements AutoCloseable {
String[] racks, String[] hosts,
long[] simulatedCapacities,
boolean setupHostsFile) throws IOException {
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
null, simulatedCapacities, setupHostsFile, false, false, null);
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts, null,
simulatedCapacities, setupHostsFile, false, false, null, null, null);
}
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
@ -1608,8 +1624,8 @@ public class MiniDFSCluster implements AutoCloseable {
long[] simulatedCapacities,
boolean setupHostsFile,
boolean checkDataNodeAddrConfig) throws IOException {
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
null, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null);
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts, null,
simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null, null, null);
}
/**
@ -1625,6 +1641,7 @@ public class MiniDFSCluster implements AutoCloseable {
* @param conf the base configuration to use in starting the DataNodes. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param storageTypes Storage Types for DataNodes.
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
* set in the conf
@ -1632,13 +1649,16 @@ public class MiniDFSCluster implements AutoCloseable {
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
* @param hosts array of strings indicating the hostnames for each DataNode
* @param storageCapacities array of Storage Capacities to be used while testing.
* @param simulatedCapacities array of capacities of the simulated data nodes
* @param setupHostsFile add new nodes to dfs hosts files
* @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already set in config
* @param checkDataNodeHostConfig if true, only set DataNode hostname key if not already set in config
* @param dnConfOverlays An array of {@link Configuration} objects that will overlay the
* global MiniDFSCluster Configuration for the corresponding DataNode.
* @throws IllegalStateException if NameNode has been shutdown
* @param dnHttpPorts An array of Http ports if present, to be used for DataNodes.
* @param dnIpcPorts An array of Ipc ports if present, to be used for DataNodes.
* @throws IOException If the DFS daemons experience some issues.
*/
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
@ -1648,7 +1668,9 @@ public class MiniDFSCluster implements AutoCloseable {
boolean setupHostsFile,
boolean checkDataNodeAddrConfig,
boolean checkDataNodeHostConfig,
Configuration[] dnConfOverlays) throws IOException {
Configuration[] dnConfOverlays,
int[] dnHttpPorts,
int[] dnIpcPorts) throws IOException {
assert storageCapacities == null || simulatedCapacities == null;
assert storageTypes == null || storageTypes.length == numDataNodes;
assert storageCapacities == null || storageCapacities.length == numDataNodes;
@ -1656,6 +1678,19 @@ public class MiniDFSCluster implements AutoCloseable {
if (operation == StartupOption.RECOVER) {
return;
}
if (dnHttpPorts != null && dnHttpPorts.length != numDataNodes) {
throw new IllegalArgumentException(
"Num of http ports (" + dnHttpPorts.length + ") should match num of DataNodes ("
+ numDataNodes + ")");
}
if (dnIpcPorts != null && dnIpcPorts.length != numDataNodes) {
throw new IllegalArgumentException(
"Num of ipc ports (" + dnIpcPorts.length + ") should match num of DataNodes ("
+ numDataNodes + ")");
}
if (checkDataNodeHostConfig) {
conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
} else {
@ -1711,7 +1746,15 @@ public class MiniDFSCluster implements AutoCloseable {
dnConf.addResource(dnConfOverlays[i]);
}
// Set up datanode address
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
int httpPort = 0;
int ipcPort = 0;
if(dnHttpPorts != null) {
httpPort = dnHttpPorts[i - curDatanodesNum];
}
if(dnIpcPorts != null) {
ipcPort = dnIpcPorts[i - curDatanodesNum];
}
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig, httpPort, ipcPort);
if (manageDfsDirs) {
String dirs = makeDataNodeDirs(i, storageTypes == null ?
null : storageTypes[i - curDatanodesNum]);
@ -3365,7 +3408,7 @@ public class MiniDFSCluster implements AutoCloseable {
}
protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
boolean checkDataNodeAddrConfig) throws IOException {
boolean checkDataNodeAddrConfig, int httpPort, int ipcPort) throws IOException {
if (setupHostsFile) {
String hostsFile = conf.get(DFS_HOSTS, "").trim();
if (hostsFile.length() == 0) {
@ -3388,11 +3431,11 @@ public class MiniDFSCluster implements AutoCloseable {
}
}
if (checkDataNodeAddrConfig) {
conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + httpPort);
conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:" + ipcPort);
} else {
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + httpPort);
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:" + ipcPort);
}
}

View File

@ -118,7 +118,7 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
Configuration dnConf = new HdfsConfiguration(conf);
// Set up datanode address
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig, 0, 0);
if (manageDfsDirs) {
String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i]);
dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
@ -235,7 +235,9 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
boolean setupHostsFile,
boolean checkDataNodeAddrConfig,
boolean checkDataNodeHostConfig,
Configuration[] dnConfOverlays) throws IOException {
Configuration[] dnConfOverlays,
int[] dnHttpPorts,
int[] dnIpcPorts) throws IOException {
startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks,
NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
checkDataNodeAddrConfig, checkDataNodeHostConfig);

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assume.assumeTrue;
@ -26,6 +27,7 @@ import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
@ -38,9 +40,13 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.Preconditions;
@ -52,6 +58,8 @@ import org.apache.hadoop.util.Preconditions;
*/
public class TestMiniDFSCluster {
private static final Logger LOG = LoggerFactory.getLogger(TestMiniDFSCluster.class);
private static final String CLUSTER_1 = "cluster1";
private static final String CLUSTER_2 = "cluster2";
private static final String CLUSTER_3 = "cluster3";
@ -319,4 +327,88 @@ public class TestMiniDFSCluster {
cluster.restartNameNode(1);
}
}
// There is a possibility that this test might fail if any other concurrently running
// test could bind same port as one of the ports returned by NetUtils.getFreeSocketPorts(6)
// before datanodes are started.
@Test
public void testStartStopWithPorts() throws Exception {
Configuration conf = new Configuration();
LambdaTestUtils.intercept(
IllegalArgumentException.class,
"Num of http ports (1) should match num of DataNodes (3)",
"MiniJournalCluster port validation failed",
() -> {
new MiniDFSCluster.Builder(conf).numDataNodes(3).setDnHttpPorts(8481).build();
});
LambdaTestUtils.intercept(
IllegalArgumentException.class,
"Num of ipc ports (2) should match num of DataNodes (1)",
"MiniJournalCluster port validation failed",
() -> {
new MiniDFSCluster.Builder(conf).setDnIpcPorts(8481, 8482).build();
});
LambdaTestUtils.intercept(
IllegalArgumentException.class,
"Num of ipc ports (1) should match num of DataNodes (3)",
"MiniJournalCluster port validation failed",
() -> {
new MiniDFSCluster.Builder(conf).numDataNodes(3).setDnHttpPorts(800, 9000, 10000)
.setDnIpcPorts(8481).build();
});
LambdaTestUtils.intercept(
IllegalArgumentException.class,
"Num of http ports (4) should match num of DataNodes (3)",
"MiniJournalCluster port validation failed",
() -> {
new MiniDFSCluster.Builder(conf).setDnHttpPorts(800, 9000, 1000, 2000)
.setDnIpcPorts(8481, 8482, 8483).numDataNodes(3).build();
});
final Set<Integer> httpAndIpcPorts = NetUtils.getFreeSocketPorts(6);
LOG.info("Free socket ports: {}", httpAndIpcPorts);
assertThat(httpAndIpcPorts).doesNotContain(0);
final int[] httpPorts = new int[3];
final int[] ipcPorts = new int[3];
int httpPortIdx = 0;
int ipcPortIdx = 0;
for (Integer httpAndIpcPort : httpAndIpcPorts) {
if (httpPortIdx < 3) {
httpPorts[httpPortIdx++] = httpAndIpcPort;
} else {
ipcPorts[ipcPortIdx++] = httpAndIpcPort;
}
}
LOG.info("Http ports selected: {}", httpPorts);
LOG.info("Ipc ports selected: {}", ipcPorts);
try (MiniDFSCluster miniDfsCluster = new MiniDFSCluster.Builder(conf)
.setDnHttpPorts(httpPorts)
.setDnIpcPorts(ipcPorts)
.numDataNodes(3).build()) {
miniDfsCluster.waitActive();
assertEquals(httpPorts[0],
miniDfsCluster.getDataNode(ipcPorts[0]).getInfoPort());
assertEquals(httpPorts[1],
miniDfsCluster.getDataNode(ipcPorts[1]).getInfoPort());
assertEquals(httpPorts[2],
miniDfsCluster.getDataNode(ipcPorts[2]).getInfoPort());
assertEquals(ipcPorts[0],
miniDfsCluster.getDataNode(ipcPorts[0]).getIpcPort());
assertEquals(ipcPorts[1],
miniDfsCluster.getDataNode(ipcPorts[1]).getIpcPort());
assertEquals(ipcPorts[2],
miniDfsCluster.getDataNode(ipcPorts[2]).getIpcPort());
}
}
}

View File

@ -262,7 +262,7 @@ public class TestBalancerLongRunningTasks {
long[][] storageCapacities = new long[][]{{ramDiskStorageLimit,
diskStorageLimit}};
cluster.startDataNodes(conf, replicationFactor, storageTypes, true, null,
null, null, storageCapacities, null, false, false, false, null);
null, null, storageCapacities, null, false, false, false, null, null, null);
cluster.triggerHeartbeats();
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);

View File

@ -949,7 +949,7 @@ public class TestMover {
{StorageType.ARCHIVE, StorageType.ARCHIVE},
{StorageType.ARCHIVE, StorageType.ARCHIVE},
{StorageType.ARCHIVE, StorageType.ARCHIVE}},
true, null, null, null,capacities, null, false, false, false, null);
true, null, null, null, capacities, null, false, false, false, null, null, null);
cluster.triggerHeartbeats();
// move file to ARCHIVE
@ -982,7 +982,7 @@ public class TestMover {
{ StorageType.SSD, StorageType.DISK },
{ StorageType.SSD, StorageType.DISK },
{ StorageType.SSD, StorageType.DISK } },
true, null, null, null, capacities, null, false, false, false, null);
true, null, null, null, capacities, null, false, false, false, null, null, null);
cluster.triggerHeartbeats();
// move file blocks to ONE_SSD policy
@ -1372,7 +1372,7 @@ public class TestMover {
final MiniDFSCluster cluster) throws IOException {
cluster.startDataNodes(conf, newNodesRequired, newTypes, true, null, null,
null, null, null, false, false, false, null);
null, null, null, false, false, false, null, null, null);
cluster.triggerHeartbeats();
}
}

View File

@ -186,7 +186,7 @@ public class TestStoragePolicySatisfierWithStripedFile {
{StorageType.ARCHIVE, StorageType.ARCHIVE},
{StorageType.ARCHIVE, StorageType.ARCHIVE},
{StorageType.ARCHIVE, StorageType.ARCHIVE}},
true, null, null, null, capacities, null, false, false, false, null);
true, null, null, null, capacities, null, false, false, false, null, null, null);
cluster.triggerHeartbeats();
// move file to ARCHIVE
@ -294,7 +294,7 @@ public class TestStoragePolicySatisfierWithStripedFile {
new StorageType[][]{
{StorageType.ARCHIVE, StorageType.ARCHIVE},
{StorageType.ARCHIVE, StorageType.ARCHIVE}},
true, null, null, null, capacities, null, false, false, false, null);
true, null, null, null, capacities, null, false, false, false, null, null, null);
cluster.triggerHeartbeats();
// Move file to ARCHIVE. Only 5 datanodes are available with ARCHIVE

View File

@ -1712,7 +1712,7 @@ public class TestExternalStoragePolicySatisfier {
}
cluster.startDataNodes(conf, newNodesRequired, newTypes, true, null, null,
null, capacities, null, false, false, false, null);
null, capacities, null, false, false, false, null, null, null);
cluster.triggerHeartbeats();
}

View File

@ -131,7 +131,7 @@ public class SimulatedDataNodes extends Configured implements Tool {
+ " block listing files; launching DataNodes accordingly.");
mc.startDataNodes(getConf(), blockListFiles.size(), null, false,
StartupOption.REGULAR, null, null, null, null, false, true, true,
null);
null, null, null);
long startTime = Time.monotonicNow();
System.out.println("Waiting for DataNodes to connect to NameNode and "
+ "init storage directories.");