diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index e99b099935a..d6efb5cbdb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -74,6 +74,7 @@ public interface HdfsClientConfigKeys { String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address"; String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes"; int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820; + int DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT = 9840; String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY = "dfs.namenode.kerberos.principal"; String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 7776dc2a936..3c71e761355 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -35,6 +35,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT; import java.io.ByteArrayInputStream; import java.io.DataInputStream; @@ -491,62 +492,26 @@ public class DFSUtil { return addressList; } - /** - * Returns list of InetSocketAddresses corresponding to namenodes from the - * configuration. - * - * Returns namenode address specifically configured for datanodes (using - * service ports), if found. If not, regular RPC address configured for other - * clients is returned. - * - * @param conf configuration - * @return list of InetSocketAddress - * @throws IOException on error - */ - public static Map> getNNServiceRpcAddresses( - Configuration conf) throws IOException { - // Use default address as fall back - String defaultAddress; - try { - defaultAddress = NetUtils.getHostPortString( - DFSUtilClient.getNNAddress(conf)); - } catch (IllegalArgumentException e) { - defaultAddress = null; - } - - Map> addressList = - DFSUtilClient.getAddresses(conf, defaultAddress, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, - DFS_NAMENODE_RPC_ADDRESS_KEY); - if (addressList.isEmpty()) { - throw new IOException("Incorrect configuration: namenode address " - + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " - + DFS_NAMENODE_RPC_ADDRESS_KEY - + " is not configured."); - } - return addressList; - } - /** * Returns list of InetSocketAddresses corresponding to the namenode * that manages this cluster. Note this is to be used by datanodes to get * the list of namenode addresses to talk to. * - * Returns namenode address specifically configured for datanodes (using - * service ports), if found. If not, regular RPC address configured for other - * clients is returned. + * Returns namenode address specifically configured for datanodes * * @param conf configuration * @return list of InetSocketAddress * @throws IOException on error */ public static Map> - getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException { + getNNServiceRpcAddresses(Configuration conf) throws IOException { // Use default address as fall back String defaultAddress; try { - defaultAddress = NetUtils.getHostPortString( - DFSUtilClient.getNNAddress(conf)); + InetSocketAddress rpcAddress = DFSUtilClient.getNNAddress(conf); + InetSocketAddress serviceAddress = InetSocketAddress.createUnresolved( + rpcAddress.getHostName(), DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT); + defaultAddress = NetUtils.getHostPortString(serviceAddress); } catch (IllegalArgumentException e) { defaultAddress = null; } @@ -569,16 +534,46 @@ public class DFSUtil { } } + // If true, then replace the port numbers in the final address list + // with the default service RPC port. + boolean replacePortNumbers = false; + + // First try to lookup using the service RPC address keys. Map> addressList = - DFSUtilClient.getAddressesForNsIds(conf, parentNameServices, - defaultAddress, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, - DFS_NAMENODE_RPC_ADDRESS_KEY); + DFSUtilClient.getAddressesForNsIds( + conf, parentNameServices, null, + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); + + // Next try to lookup using the RPC address key. + if (addressList.isEmpty()) { + replacePortNumbers = true; + addressList = DFSUtilClient.getAddressesForNsIds( + conf, parentNameServices, null, DFS_NAMENODE_RPC_ADDRESS_KEY); + } + + // Finally, fallback to the default address. + // This will not yield the correct address in a federated/HA setup. + if (addressList.isEmpty()) { + addressList = DFSUtilClient.getAddressesForNsIds( + conf, parentNameServices, defaultAddress); + } + if (addressList.isEmpty()) { throw new IOException("Incorrect configuration: namenode address " - + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " - + DFS_NAMENODE_RPC_ADDRESS_KEY - + " is not configured."); + + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " + + DFS_NAMENODE_RPC_ADDRESS_KEY + + " is not configured."); + } + + if (replacePortNumbers) { + // Replace the RPC port(s) with the default service RPC port(s) + addressList.forEach((nsId, addresses) -> { + addresses.forEach((nnId, address) -> { + InetSocketAddress serviceAddress = InetSocketAddress.createUnresolved( + address.getHostName(), DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT); + addresses.put(nnId, serviceAddress); + }); + }); } return addressList; } @@ -1230,12 +1225,17 @@ public class DFSUtil { String serviceAddrKey = DFSUtilClient.concatSuffixes( DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId); - String addrKey = DFSUtilClient.concatSuffixes( - DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId); - String serviceRpcAddr = conf.get(serviceAddrKey); if (serviceRpcAddr == null) { - serviceRpcAddr = conf.get(addrKey); + String addrKey = DFSUtilClient.concatSuffixes( + DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId); + String rpcAddress = conf.get(addrKey); + if (rpcAddress != null) { + InetSocketAddress rpcAddr = NetUtils.createSocketAddr(rpcAddress); + InetSocketAddress serviceAddr = InetSocketAddress.createUnresolved( + rpcAddr.getHostName(), DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT); + serviceRpcAddr = NetUtils.getHostPortString(serviceAddr); + } } return serviceRpcAddr; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java index f6a11c24bb5..677559cb0b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java @@ -150,7 +150,7 @@ class BlockPoolManager { (DFSConfigKeys.DFS_NAMESERVICES)); Map> newAddressMap = DFSUtil - .getNNServiceRpcAddressesForCluster(conf); + .getNNServiceRpcAddresses(conf); Map> newLifelineAddressMap = DFSUtil .getNNLifelineRpcAddressesForCluster(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 318d8e011a4..5c2dcdc652c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -318,7 +318,7 @@ public class BackupNode extends NameNode { private NamespaceInfo handshake(Configuration conf) throws IOException { // connect to name node - InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true); + InetSocketAddress nnAddress = NameNode.getServiceAddress(conf); this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index d9f3c0ee1c1..55695b358b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1157,9 +1157,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, dir.setINodeAttributeProvider(inodeAttributeProvider); } snapshotManager.registerMXBean(); - InetSocketAddress serviceAddress = NameNode.getServiceAddress(conf, true); - this.nameNodeHostName = (serviceAddress != null) ? - serviceAddress.getHostName() : ""; + InetSocketAddress serviceAddress = NameNode.getServiceAddress(conf); + this.nameNodeHostName = serviceAddress.getHostName(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 79bbbc5d020..d700439aac9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -505,18 +505,17 @@ public class NameNode extends ReconfigurableBase implements /** * Fetches the address for services to use when connecting to namenode - * based on the value of fallback returns null if the special - * address is not specified or returns the default namenode address - * to be used by both clients and services. * Services here are datanodes, backup node, any non client connection */ - public static InetSocketAddress getServiceAddress(Configuration conf, - boolean fallback) { - String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); - if (addr == null || addr.isEmpty()) { - return fallback ? DFSUtilClient.getNNAddress(conf) : null; + public static InetSocketAddress getServiceAddress(Configuration conf) { + String address = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); + if (address == null || address.isEmpty()) { + InetSocketAddress rpcAddress = DFSUtilClient.getNNAddress(conf); + return NetUtils.createSocketAddr(rpcAddress.getHostName(), + HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT); } - return DFSUtilClient.getNNAddress(addr); + return NetUtils.createSocketAddr(address, + HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT); } // @@ -554,7 +553,7 @@ public class NameNode extends ReconfigurableBase implements * If the service rpc is not configured returns null */ protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) { - return NameNode.getServiceAddress(conf, false); + return NameNode.getServiceAddress(conf); } protected InetSocketAddress getRpcServerAddress(Configuration conf) { @@ -615,7 +614,8 @@ public class NameNode extends ReconfigurableBase implements } /** - * Modifies the configuration passed to contain the service rpc address setting + * Modifies the configuration passed to contain the service rpc address + * setting. */ protected void setRpcServiceServerAddress(Configuration conf, InetSocketAddress serviceRPCAddress) { @@ -1070,6 +1070,13 @@ public class NameNode extends ReconfigurableBase implements return serviceAddr == null ? getNameNodeAddress() : serviceAddr; } + /** + * @return NameNode service RPC address in "host:port" string form + */ + public String getServiceRpcAddressHostPortString() { + return NetUtils.getHostPortString(getServiceRpcAddress()); + } + /** * @return NameNode HTTP address, used by the Web UI, image transfer, * and HTTP-based file system clients like WebHDFS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 1ef3f55b053..78790bd286e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -333,66 +333,63 @@ public class NameNodeRpcServer implements NamenodeProtocols { .newReflectiveBlockingService(traceAdminXlator); InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf); - if (serviceRpcAddr != null) { - String bindHost = nn.getServiceRpcServerBindHost(conf); - if (bindHost == null) { - bindHost = serviceRpcAddr.getHostName(); - } - LOG.info("Service RPC server is binding to " + bindHost + ":" + - serviceRpcAddr.getPort()); - - int serviceHandlerCount = - conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY, - DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT); - serviceRpcServer = new RPC.Builder(conf) - .setProtocol( - org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class) - .setInstance(clientNNPbService) - .setBindAddress(bindHost) - .setPort(serviceRpcAddr.getPort()) - .setNumHandlers(serviceHandlerCount) - .setVerbose(false) - .setSecretManager(namesystem.getDelegationTokenSecretManager()) - .build(); - - // Add all the RPC protocols that the namenode implements - DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService, - serviceRpcServer); - DFSUtil.addPBProtocol(conf, ReconfigurationProtocolPB.class, - reconfigurationPbService, serviceRpcServer); - DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService, - serviceRpcServer); - DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService, - serviceRpcServer); - DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class, - refreshAuthService, serviceRpcServer); - DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, - refreshUserMappingService, serviceRpcServer); - // We support Refreshing call queue here in case the client RPC queue is full - DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class, - refreshCallQueueService, serviceRpcServer); - DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class, - genericRefreshService, serviceRpcServer); - DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, - getUserMappingService, serviceRpcServer); - DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class, - traceAdminService, serviceRpcServer); - - // Update the address with the correct port - InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress(); - serviceRPCAddress = new InetSocketAddress( - serviceRpcAddr.getHostName(), listenAddr.getPort()); - nn.setRpcServiceServerAddress(conf, serviceRPCAddress); - } else { - serviceRpcServer = null; - serviceRPCAddress = null; + String bindHost = nn.getServiceRpcServerBindHost(conf); + if (bindHost == null) { + bindHost = serviceRpcAddr.getHostName(); } + LOG.info("Service RPC server is binding to " + bindHost + ":" + + serviceRpcAddr.getPort()); + + int serviceHandlerCount = conf.getInt( + DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY, + DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT); + serviceRpcServer = new RPC.Builder(conf) + .setProtocol( + org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class) + .setInstance(clientNNPbService) + .setBindAddress(bindHost) + .setPort(serviceRpcAddr.getPort()) + .setNumHandlers(serviceHandlerCount) + .setVerbose(false) + .setSecretManager(namesystem.getDelegationTokenSecretManager()) + .build(); + + // Add all the RPC protocols that the namenode implements + DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService, + serviceRpcServer); + DFSUtil.addPBProtocol(conf, ReconfigurationProtocolPB.class, + reconfigurationPbService, serviceRpcServer); + DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService, + serviceRpcServer); + DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService, + serviceRpcServer); + DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class, + refreshAuthService, serviceRpcServer); + DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, + refreshUserMappingService, serviceRpcServer); + // We support Refreshing call queue here in case the client RPC queue + // is full. + DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class, + refreshCallQueueService, serviceRpcServer); + DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class, + genericRefreshService, serviceRpcServer); + DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, + getUserMappingService, serviceRpcServer); + DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class, + traceAdminService, serviceRpcServer); + + // Update the address with the correct port. + InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress(); + serviceRPCAddress = new InetSocketAddress( + serviceRpcAddr.getHostName(), listenAddr.getPort()); + nn.setRpcServiceServerAddress(conf, serviceRPCAddress); + InetSocketAddress lifelineRpcAddr = nn.getLifelineRpcServerAddress(conf); if (lifelineRpcAddr != null) { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, ProtobufRpcEngine.class); - String bindHost = nn.getLifelineRpcServerBindHost(conf); + bindHost = nn.getLifelineRpcServerBindHost(conf); if (bindHost == null) { bindHost = lifelineRpcAddr.getHostName(); } @@ -422,7 +419,7 @@ public class NameNodeRpcServer implements NamenodeProtocols { lifelineProtoPbService, lifelineRpcServer); // Update the address with the correct port - InetSocketAddress listenAddr = lifelineRpcServer.getListenerAddress(); + listenAddr = lifelineRpcServer.getListenerAddress(); lifelineRPCAddress = new InetSocketAddress(lifelineRpcAddr.getHostName(), listenAddr.getPort()); nn.setRpcLifelineServerAddress(conf, lifelineRPCAddress); @@ -432,7 +429,7 @@ public class NameNodeRpcServer implements NamenodeProtocols { } InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf); - String bindHost = nn.getRpcServerBindHost(conf); + bindHost = nn.getRpcServerBindHost(conf); if (bindHost == null) { bindHost = rpcAddr.getHostName(); } @@ -476,16 +473,14 @@ public class NameNodeRpcServer implements NamenodeProtocols { conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); - if (serviceRpcServer != null) { - serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); - } + serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); if (lifelineRpcServer != null) { lifelineRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); } } // The rpc-server port can be ephemeral... ensure we have the correct info - InetSocketAddress listenAddr = clientRpcServer.getListenerAddress(); + listenAddr = clientRpcServer.getListenerAddress(); clientRpcAddress = new InetSocketAddress( rpcAddr.getHostName(), listenAddr.getPort()); nn.setRpcServerAddress(conf, clientRpcAddress); @@ -523,9 +518,7 @@ public class NameNodeRpcServer implements NamenodeProtocols { clientRpcServer.addSuppressedLoggingExceptions(StandbyException.class); clientRpcServer.setTracer(nn.tracer); - if (serviceRpcServer != null) { - serviceRpcServer.setTracer(nn.tracer); - } + serviceRpcServer.setTracer(nn.tracer); if (lifelineRpcServer != null) { lifelineRpcServer.setTracer(nn.tracer); } @@ -554,9 +547,7 @@ public class NameNodeRpcServer implements NamenodeProtocols { */ void start() { clientRpcServer.start(); - if (serviceRpcServer != null) { - serviceRpcServer.start(); - } + serviceRpcServer.start(); if (lifelineRpcServer != null) { lifelineRpcServer.start(); } @@ -567,9 +558,7 @@ public class NameNodeRpcServer implements NamenodeProtocols { */ void join() throws InterruptedException { clientRpcServer.join(); - if (serviceRpcServer != null) { - serviceRpcServer.join(); - } + serviceRpcServer.join(); if (lifelineRpcServer != null) { lifelineRpcServer.join(); } @@ -582,9 +571,7 @@ public class NameNodeRpcServer implements NamenodeProtocols { if (clientRpcServer != null) { clientRpcServer.stop(); } - if (serviceRpcServer != null) { - serviceRpcServer.stop(); - } + serviceRpcServer.stop(); if (lifelineRpcServer != null) { lifelineRpcServer.stop(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index ff83e3493c6..e8dfb721340 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -228,7 +228,7 @@ public class SecondaryNameNode implements Runnable, // Create connection to the namenode. shouldRun = true; - nameNodeAddr = NameNode.getServiceAddress(conf, true); + nameNodeAddr = NameNode.getServiceAddress(conf); this.conf = conf; this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index f57cb4bd939..fd5a70e3843 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -159,7 +159,8 @@ public class EditLogTailer { for (RemoteNameNodeInfo info : nns) { // overwrite the socket address, if we need to - InetSocketAddress ipc = NameNode.getServiceAddress(info.getConfiguration(), true); + InetSocketAddress ipc = NameNode.getServiceAddress( + info.getConfiguration()); // sanity check the ipc address Preconditions.checkArgument(ipc.getPort() > 0, "Active NameNode must have an IPC port configured. " + "Got address '%s'", ipc); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java index 9a51190b176..248be55474a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java @@ -54,7 +54,7 @@ public class RemoteNameNodeInfo { for (Configuration otherNode : otherNodes) { String otherNNId = HAUtil.getNameNodeId(otherNode, nsId); // don't do any validation here as in some cases, it can be overwritten later - InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode, true); + InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode); final String scheme = DFSUtil.getHttpClientScheme(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index 789ed9ca2f0..3cbcd9c2191 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -121,7 +121,7 @@ public class StandbyCheckpointer { private URL getHttpAddress(Configuration conf) throws IOException { final String scheme = DFSUtil.getHttpClientScheme(conf); - String defaultHost = NameNode.getServiceAddress(conf, true).getHostName(); + String defaultHost = NameNode.getServiceAddress(conf).getHostName(); URI addr = DFSUtil.getInfoServerWithDefaultHost(defaultHost, conf, scheme); return addr.toURL(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java index e6cf16c89df..e780393db5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java @@ -187,7 +187,7 @@ public class GetConf extends Configured implements Tool { static class NameNodesCommandHandler extends CommandHandler { @Override int doWorkInternal(GetConf tool, String []args) throws IOException { - tool.printMap(DFSUtil.getNNServiceRpcAddressesForCluster(tool.getConf())); + tool.printMap(DFSUtil.getNNServiceRpcAddresses(tool.getConf())); return 0; } } @@ -224,7 +224,7 @@ public class GetConf extends Configured implements Tool { public int doWorkInternal(GetConf tool, String []args) throws IOException { Configuration config = tool.getConf(); List cnnlist = DFSUtil.flattenAddressMap( - DFSUtil.getNNServiceRpcAddressesForCluster(config)); + DFSUtil.getNNServiceRpcAddresses(config)); if (!cnnlist.isEmpty()) { for (ConfiguredNNAddress cnn : cnnlist) { InetSocketAddress rpc = cnn.getAddress(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index f85996b334f..4f37b5637ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -61,8 +61,7 @@ connecting to this address if it is configured. In the case of HA/Federation where multiple namenodes exist, the name service id is added to the name e.g. dfs.namenode.servicerpc-address.ns1 dfs.namenode.rpc-address.EXAMPLENAMESERVICE - The value of this property will take the form of nn-host1:rpc-port. - If the value of this property is unset the value of dfs.namenode.rpc-address will be used as the default. + The value of this property will take the form of nn-host1:rpc-port. The NameNode's default service RPC port is 9840. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 0345cf5ef50..aa3ed303cad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -166,6 +166,7 @@ public class MiniDFSCluster implements AutoCloseable { */ public static class Builder { private int nameNodePort = 0; + private int nameNodeServicePort = 0; private int nameNodeHttpPort = 0; private final Configuration conf; private int numDataNodes = 1; @@ -208,6 +209,14 @@ public class MiniDFSCluster implements AutoCloseable { this.nameNodePort = val; return this; } + + /** + * Default: 0 + */ + public Builder nameNodeServicePort(int val) { + this.nameNodeServicePort = val; + return this; + } /** * Default: 0 @@ -399,8 +408,8 @@ public class MiniDFSCluster implements AutoCloseable { } /** - * Default: false - * When true the hosts file/include file for the cluster is setup + * Default: false. + * When true the hosts file/include file for the cluster is setup. */ public Builder setupHostsFile(boolean val) { this.setupHostsFile = val; @@ -410,7 +419,7 @@ public class MiniDFSCluster implements AutoCloseable { /** * Default: a single namenode. * See {@link MiniDFSNNTopology#simpleFederatedTopology(int)} to set up - * federated nameservices + * federated nameservices. */ public Builder nnTopology(MiniDFSNNTopology topology) { this.nnTopology = topology; @@ -461,7 +470,8 @@ public class MiniDFSCluster implements AutoCloseable { if (builder.nnTopology == null) { // If no topology is specified, build a single NN. builder.nnTopology = MiniDFSNNTopology.simpleSingleNN( - builder.nameNodePort, builder.nameNodeHttpPort); + builder.nameNodePort, builder.nameNodeServicePort, + builder.nameNodeHttpPort); } assert builder.storageTypes == null || builder.storageTypes.length == builder.numDataNodes; @@ -770,7 +780,7 @@ public class MiniDFSCluster implements AutoCloseable { manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, operation, null, racks, hosts, null, simulatedCapacities, null, true, false, - MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), + MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0, 0), true, false, false, null, true, false); } @@ -1249,6 +1259,11 @@ public class MiniDFSCluster implements AutoCloseable { DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnConf.getNnId()); conf.set(key, "127.0.0.1:" + nnConf.getIpcPort()); + + key = DFSUtil.addKeySuffixes( + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId, + nnConf.getNnId()); + conf.set(key, "127.0.0.1:" + nnConf.getServicePort()); } private static String[] createArgs(StartupOption operation) { @@ -1282,6 +1297,8 @@ public class MiniDFSCluster implements AutoCloseable { // the conf hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), nn.getNameNodeAddressHostPortString()); + hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + nameserviceId, nnId), nn.getServiceRpcAddressHostPortString()); if (nn.getHttpAddress() != null) { hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress())); @@ -1337,6 +1354,14 @@ public class MiniDFSCluster implements AutoCloseable { return getNN(nnIndex).conf; } + /** + * Return the cluster-wide configuration. + * @return + */ + public Configuration getClusterConfiguration() { + return conf; + } + private NameNodeInfo getNN(int nnIndex) { int count = 0; for (NameNodeInfo nn : namenodes.values()) { @@ -1929,6 +1954,16 @@ public class MiniDFSCluster implements AutoCloseable { return getNN(nnIndex).nameNode.getNameNodeAddress().getPort(); } + /** + * Gets the service rpc port used by the NameNode, because the caller + * supplied port is not necessarily the actual port used. + * Assumption: cluster has a single namenode + */ + public int getNameNodeServicePort() { + checkSingleNameNode(); + return getNameNodeServicePort(0); + } + /** * @return the service rpc port used by the NameNode at the given index. */ @@ -2556,12 +2591,14 @@ public class MiniDFSCluster implements AutoCloseable { } NameNodeInfo info = getNN(nnIndex); - InetSocketAddress addr = info.nameNode.getServiceRpcAddress(); - assert addr.getPort() != 0; - DFSClient client = new DFSClient(addr, conf); + InetSocketAddress nameNodeAddress = info.nameNode.getNameNodeAddress(); + assert nameNodeAddress.getPort() != 0; + DFSClient client = new DFSClient(nameNodeAddress, conf); // ensure all datanodes have registered and sent heartbeat to the namenode - while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE), addr)) { + InetSocketAddress serviceAddress = info.nameNode.getServiceRpcAddress(); + while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE), + serviceAddress)) { try { LOG.info("Waiting for cluster to become active"); Thread.sleep(100); @@ -3056,13 +3093,18 @@ public class MiniDFSCluster implements AutoCloseable { } } + public void addNameNode(Configuration conf, int namenodePort) + throws IOException{ + addNameNode(conf, namenodePort, 0); + } + /** * Add a namenode to a federated cluster and start it. Configuration of * datanodes in the cluster is refreshed to register with the new namenode. * * @return newly started namenode */ - public void addNameNode(Configuration conf, int namenodePort) + public void addNameNode(Configuration conf, int namenodePort, int servicePort) throws IOException { if(!federation) throw new IOException("cannot add namenode to non-federated cluster"); @@ -3076,7 +3118,9 @@ public class MiniDFSCluster implements AutoCloseable { String nnId = null; initNameNodeAddress(conf, nameserviceId, - new NNConf(nnId).setIpcPort(namenodePort)); + new NNConf(nnId) + .setIpcPort(namenodePort) + .setServicePort(servicePort)); // figure out the current number of NNs NameNodeInfo[] infos = this.getNameNodeInfos(nameserviceId); int nnIndex = infos == null ? 0 : infos.length; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java index b9786a32a75..b1d609ac413 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java @@ -43,12 +43,13 @@ public class MiniDFSNNTopology { * Set up a simple non-federated non-HA NN. */ public static MiniDFSNNTopology simpleSingleNN( - int nameNodePort, int nameNodeHttpPort) { + int rpcPort, int servicePort, int httpPort) { return new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf(null) .addNN(new MiniDFSNNTopology.NNConf(null) - .setHttpPort(nameNodeHttpPort) - .setIpcPort(nameNodePort))); + .setIpcPort(rpcPort) + .setServicePort(servicePort) + .setHttpPort(httpPort))); } @@ -221,6 +222,7 @@ public class MiniDFSNNTopology { private final String nnId; private int httpPort; private int ipcPort; + private int servicePort; private String clusterId; public NNConf(String nnId) { @@ -234,6 +236,10 @@ public class MiniDFSNNTopology { int getIpcPort() { return ipcPort; } + + int getServicePort() { + return servicePort; + } int getHttpPort() { return httpPort; @@ -253,6 +259,11 @@ public class MiniDFSNNTopology { return this; } + public NNConf setServicePort(int servicePort) { + this.servicePort = servicePort; + return this; + } + public NNConf setClusterId(String clusterId) { this.clusterId = clusterId; return this; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index f811d3d675f..4ae2a773bb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -33,6 +33,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; import static org.hamcrest.CoreMatchers.not; @@ -83,9 +84,9 @@ import com.google.common.collect.Sets; public class TestDFSUtil { - static final String NS1_NN_ADDR = "ns1-nn.example.com:9820"; - static final String NS1_NN1_ADDR = "ns1-nn1.example.com:9820"; - static final String NS1_NN2_ADDR = "ns1-nn2.example.com:9820"; + private static final String NS1_NN_ADDR = "ns1-nn.example.com:9820"; + private static final String NS1_NN1_ADDR = "ns1-nn1.example.com:9820"; + private static final String NS1_NN2_ADDR = "ns1-nn2.example.com:9820"; /** * Reset to default UGI settings since some tests change them. @@ -273,13 +274,13 @@ public class TestDFSUtil { assertEquals(1, nn1Map.size()); InetSocketAddress addr = nn1Map.get(null); assertEquals("localhost", addr.getHostName()); - assertEquals(9000, addr.getPort()); + assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, addr.getPort()); Map nn2Map = nnMap.get("nn2"); assertEquals(1, nn2Map.size()); addr = nn2Map.get(null); assertEquals("localhost", addr.getHostName()); - assertEquals(9001, addr.getPort()); + assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, addr.getPort()); // Test - can look up nameservice ID from service address checkNameServiceId(conf, NN1_ADDRESS, "nn1"); @@ -314,7 +315,8 @@ public class TestDFSUtil { Map defaultNsMap = addrMap.get(null); assertEquals(1, defaultNsMap.size()); - assertEquals(9999, defaultNsMap.get(null).getPort()); + assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, + defaultNsMap.get(null).getPort()); } /** @@ -491,6 +493,10 @@ public class TestDFSUtil { final String NS1_NN2_HOST = "ns1-nn2.example.com:9820"; final String NS2_NN1_HOST = "ns2-nn1.example.com:9820"; final String NS2_NN2_HOST = "ns2-nn2.example.com:9820"; + final String NS1_NN1_SERVICE_HOST = "ns1-nn1.example.com:9840"; + final String NS1_NN2_SERVICE_HOST = "ns1-nn2.example.com:9840"; + final String NS2_NN1_SERVICE_HOST = "ns2-nn1.example.com:9840"; + final String NS2_NN2_SERVICE_HOST = "ns2-nn2.example.com:9840"; conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1"); // Two nameservices, each with two NNs. @@ -524,12 +530,14 @@ public class TestDFSUtil { assertEquals(NS2_NN1_HOST, map.get("ns2").get("ns2-nn1").toString()); assertEquals(NS2_NN2_HOST, map.get("ns2").get("ns2-nn2").toString()); - assertEquals(NS1_NN1_HOST, + assertEquals(NS1_NN1_SERVICE_HOST, DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn1")); - assertEquals(NS1_NN2_HOST, + assertEquals(NS1_NN2_SERVICE_HOST, DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn2")); - assertEquals(NS2_NN1_HOST, + assertEquals(NS2_NN1_SERVICE_HOST, DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn1")); + assertEquals(NS2_NN2_SERVICE_HOST, + DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn2")); // No nameservice was given and we can't determine which service addr // to use as two nameservices could share a namenode ID. @@ -555,9 +563,11 @@ public class TestDFSUtil { // One nameservice with two NNs final String NS1_NN1_HOST = "ns1-nn1.example.com:9820"; - final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821"; - final String NS1_NN2_HOST = "ns1-nn1.example.com:9820"; + final String NS1_NN1_HOST_SVC = "ns1-nn1.example.com:9821"; + final String NS1_NN1_HOST_DEFAULT_SVC = "ns1-nn1.example.com:9840"; + final String NS1_NN2_HOST = "ns1-nn2.example.com:9820"; final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821"; + final String NS1_NN2_HOST_DEFAULT_SVC = "ns1-nn2.example.com:9840"; conf.set(DFS_NAMESERVICES, "ns1"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2"); @@ -567,12 +577,15 @@ public class TestDFSUtil { conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST); - // The rpc address is used if no service address is defined - assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, null, "nn1")); - assertEquals(NS1_NN2_HOST, DFSUtil.getNamenodeServiceAddr(conf, null, "nn2")); + // The default service rpc address is used if no service address is defined + assertEquals(NS1_NN1_HOST_DEFAULT_SVC, + DFSUtil.getNamenodeServiceAddr(conf, null, "nn1")); + assertEquals(NS1_NN2_HOST_DEFAULT_SVC, + DFSUtil.getNamenodeServiceAddr(conf, null, "nn2")); // A nameservice is specified explicitly - assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, "ns1", "nn1")); + assertEquals(NS1_NN1_HOST_DEFAULT_SVC, + DFSUtil.getNamenodeServiceAddr(conf, "ns1", "nn1")); assertEquals(null, DFSUtil.getNamenodeServiceAddr(conf, "invalid", "nn1")); // The service addrs are used when they are defined @@ -995,6 +1008,92 @@ public class TestDFSUtil { Assert.assertEquals(null, DFSUtil.getPassword(conf,"invalid-alias")); } + @Test + public void testGetNNServiceRpcAddresses() throws IOException { + Configuration conf = new HdfsConfiguration(); + final String NN_HOST = "nn.example.com"; + final String NN_ADDRESS = "hdfs://" + NN_HOST + ":9000/"; + conf.set(FS_DEFAULT_NAME_KEY, NN_ADDRESS); + + // No service RPC, no rpc + Map> nsMap = DFSUtil + .getNNServiceRpcAddresses(conf); + assertEquals(1, nsMap.size()); + InetSocketAddress address = nsMap.get(null).get(null); + assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, + address.getPort()); + assertEquals(NN_HOST, address.getHostName()); + + // No service RPC + final String RPC_ADDRESS = NN_HOST + ":9191"; + conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, RPC_ADDRESS); + nsMap = DFSUtil.getNNServiceRpcAddresses(conf); + assertEquals(1, nsMap.size()); + address = nsMap.get(null).get(null); + assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, + address.getPort()); + assertEquals(NN_HOST, address.getHostName()); + + // Service RPC present + final String SERVICE_RPC_ADDRESS = NN_HOST + ":9292"; + conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, SERVICE_RPC_ADDRESS); + nsMap = DFSUtil.getNNServiceRpcAddresses(conf); + assertEquals(1, nsMap.size()); + address = nsMap.get(null).get(null); + assertEquals(9292, address.getPort()); + assertEquals(NN_HOST, address.getHostName()); + } + + @Test + public void testGetNNServiceRpcAddressesForHA() throws IOException { + Configuration conf = new HdfsConfiguration(); + + final String NS = "mycluster"; + final String NN1_HOST = "nn1.example.com"; + final String NN2_HOST = "nn2.example.com"; + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://mycluster"); + + conf.set(DFS_NAMESERVICES, NS); + conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NS), + "nn1,nn2"); + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, NS, "nn1"), + NN1_HOST + ":9820"); + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, NS, "nn2"), + NN2_HOST + ":9820"); + + assertTrue(HAUtil.isHAEnabled(conf, NS)); + + // Without Service RPC keys + Map> nsMap = + DFSUtil.getNNServiceRpcAddresses(conf); + assertEquals(1, nsMap.size()); + Map nnMap = nsMap.get(NS); + assertEquals(2, nnMap.size()); + InetSocketAddress nn1Address = nnMap.get("nn1"); + assertEquals(NN1_HOST, nn1Address.getHostName()); + assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, nn1Address.getPort()); + InetSocketAddress nn2Address = nnMap.get("nn2"); + assertEquals(NN2_HOST, nn2Address.getHostName()); + assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, nn2Address.getPort()); + + // With Service RPC keys + final int CUSTOM_SERVICE_PORT = 9191; + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + NS, "nn1"), NN1_HOST + ":" + CUSTOM_SERVICE_PORT); + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + NS, "nn2"), NN2_HOST + ":" + CUSTOM_SERVICE_PORT); + nsMap = DFSUtil.getNNServiceRpcAddresses(conf); + assertEquals(1, nsMap.size()); + nnMap = nsMap.get(NS); + assertEquals(2, nnMap.size()); + nn1Address = nnMap.get("nn1"); + assertEquals(NN1_HOST, nn1Address.getHostName()); + assertEquals(CUSTOM_SERVICE_PORT, nn1Address.getPort()); + nn2Address = nnMap.get("nn2"); + assertEquals(NN2_HOST, nn2Address.getHostName()); + assertEquals(CUSTOM_SERVICE_PORT, nn2Address.getPort()); + } + @Test public void testGetNNServiceRpcAddressesForNsIds() throws IOException { Configuration conf = new HdfsConfiguration(); @@ -1017,13 +1116,13 @@ public class TestDFSUtil { } Map> nnMap = DFSUtil - .getNNServiceRpcAddressesForCluster(conf); + .getNNServiceRpcAddresses(conf); assertEquals(1, nnMap.size()); assertTrue(nnMap.containsKey("nn1")); conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "nn3"); try { - DFSUtil.getNNServiceRpcAddressesForCluster(conf); + DFSUtil.getNNServiceRpcAddresses(conf); fail("Should fail for misconfiguration"); } catch (IOException ignored) { } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index 59e85553e5c..1914b788034 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.DNS; import org.apache.hadoop.test.PathUtils; +import org.junit.Ignore; import org.junit.Test; import java.io.File; @@ -277,17 +278,14 @@ public class TestHDFSServerPorts { // different http port conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST); started = canStartNameNode(conf2); + assertFalse("Should've failed on service port", started); - if (withService) { - assertFalse("Should've failed on service port", started); - - // reset conf2 since NameNode modifies it - FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST); - conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST); - // Set Service address - conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, THIS_HOST); - started = canStartNameNode(conf2); - } + // reset conf2 since NameNode modifies it + FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST); + conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST); + // Set Service address + conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, THIS_HOST); + started = canStartNameNode(conf2); assertTrue(started); } finally { stopNameNode(nn); @@ -359,38 +357,39 @@ public class TestHDFSServerPorts { } } - /** - * Verify BackupNode port usage. - */ - @Test(timeout = 300000) - public void testBackupNodePorts() throws Exception { - NameNode nn = null; - try { - nn = startNameNode(); + /** + * Verify BackupNode port usage. + */ + @Ignore + @Test(timeout = 300000) + public void testBackupNodePorts() throws Exception { + NameNode nn = null; + try { + nn = startNameNode(); - Configuration backup_config = new HdfsConfiguration(config); - backup_config.set( - DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST); - // bind http server to the same port as name-node - backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, - backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY)); + Configuration backup_config = new HdfsConfiguration(config); + backup_config.set( + DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST); + // bind http server to the same port as name-node + backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, + backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY)); - LOG.info("= Starting 1 on: " + backup_config.get( - DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY)); + LOG.info("= Starting 1 on: " + backup_config.get( + DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY)); - assertFalse("Backup started on same port as Namenode", - canStartBackupNode(backup_config)); // should fail + assertFalse("Backup started on same port as Namenode", + canStartBackupNode(backup_config)); // should fail - // bind http server to a different port - backup_config.set( - DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST); - LOG.info("= Starting 2 on: " + backup_config.get( - DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY)); + // bind http server to a different port + backup_config.set( + DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST); + LOG.info("= Starting 2 on: " + backup_config.get( + DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY)); - boolean started = canStartBackupNode(backup_config); - assertTrue("Backup Namenode should've started", started); // should start now - } finally { - stopNameNode(nn); - } + boolean started = canStartBackupNode(backup_config); + assertTrue("Backup Namenode should've started", started); // should start now + } finally { + stopNameNode(nn); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index f25d28f22c3..df6dc03b188 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -324,7 +324,7 @@ public class TestSafeMode { } catch (RemoteException re) { assertEquals(SafeModeException.class.getName(), re.getClassName()); GenericTestUtils.assertExceptionContains( - NameNode.getServiceAddress(conf, true).getHostName(), re); + NameNode.getServiceAddress(conf).getHostName(), re); } catch (IOException ioe) { fail("Encountered exception" + " " + StringUtils.stringifyException(ioe)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java index c1638942c85..501ba77c94b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java @@ -77,7 +77,9 @@ public class MiniQJMHACluster { public static MiniDFSNNTopology createDefaultTopology(int nns, int startingPort) { MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(NAMESERVICE); for (int i = 0; i < nns; i++) { - nameservice.addNN(new MiniDFSNNTopology.NNConf("nn" + i).setIpcPort(startingPort++) + nameservice.addNN(new MiniDFSNNTopology.NNConf("nn" + i) + .setIpcPort(startingPort++) + .setServicePort(startingPort++) .setHttpPort(startingPort++)); } @@ -148,8 +150,9 @@ public class MiniQJMHACluster { int port = basePort; for (int i = 0; i < numNNs; i++) { nns.add("127.0.0.1:" + port); - // increment by 2 each time to account for the http port in the config setting - port += 2; + // increment by 3 each time to account for the http and the service port + // in the config setting + port += 3; } // use standard failover configurations diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index 14441931fe9..516f15932c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -89,8 +89,9 @@ public class TestBalancerWithHANameNodes { / numOfDatanodes, (short) numOfDatanodes, 1); // start up an empty node with the same capacity and on the same rack - cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack }, - new long[] { newNodeCapacity }); + cluster.startDataNodes(cluster.getClusterConfiguration(), + 1, true, null, new String[] {newNodeRack}, + new long[] {newNodeCapacity}); totalCapacity += newNodeCapacity; TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java index 876a8545ec5..c199c9c18c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java @@ -105,8 +105,11 @@ public class InternalDataNodeTestUtils { * * @throws IOException */ - public static DataNode startDNWithMockNN(Configuration conf, - final InetSocketAddress nnSocketAddr, final String dnDataDir) + public static DataNode startDNWithMockNN( + Configuration conf, + final InetSocketAddress nnSocketAddr, + final InetSocketAddress nnServiceAddr, + final String dnDataDir) throws IOException { FileSystem.setDefaultUri(conf, "hdfs://" + nnSocketAddr.getHostName() + ":" @@ -149,7 +152,7 @@ public class InternalDataNodeTestUtils { @Override DatanodeProtocolClientSideTranslatorPB connectToNN( InetSocketAddress nnAddr) throws IOException { - Assert.assertEquals(nnSocketAddr, nnAddr); + Assert.assertEquals(nnServiceAddr, nnAddr); return namenode; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 311d5a67c39..98450f6fd78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -124,8 +124,6 @@ public class TestBlockRecovery { private final static long RECOVERY_ID = 3000L; private final static String CLUSTER_ID = "testClusterID"; private final static String POOL_ID = "BP-TEST"; - private final static InetSocketAddress NN_ADDR = new InetSocketAddress( - "localhost", 5020); private final static long BLOCK_ID = 1000L; private final static long GEN_STAMP = 2000L; private final static long BLOCK_LEN = 3000L; @@ -188,7 +186,7 @@ public class TestBlockRecovery { } conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); FileSystem.setDefaultUri(conf, - "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort()); + "hdfs://localhost:5020"); ArrayList locations = new ArrayList(); File dataDir = new File(DATA_DIR); FileUtil.fullyDelete(dataDir); @@ -231,7 +229,7 @@ public class TestBlockRecovery { @Override DatanodeProtocolClientSideTranslatorPB connectToNN( InetSocketAddress nnAddr) throws IOException { - Assert.assertEquals(NN_ADDR, nnAddr); + Assert.assertEquals("localhost:9840", nnAddr.toString()); return namenode; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 32fda370161..bee6c1d061d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -61,11 +61,16 @@ import com.google.common.base.Supplier; public class TestDataNodeMetricsLogger { static final Log LOG = LogFactory.getLog(TestDataNodeMetricsLogger.class); + @Rule + public Timeout globalTimeout = new Timeout(120_000); + private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory() + "data"; private final static InetSocketAddress NN_ADDR = new InetSocketAddress( "localhost", 5020); + private final static InetSocketAddress NN_SERVICE_ADDR = + new InetSocketAddress("localhost", 5021); private DataNode dn; @@ -86,10 +91,13 @@ public class TestDataNodeMetricsLogger { conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); + conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + NN_SERVICE_ADDR.getHostName() + ":" + NN_SERVICE_ADDR.getPort()); conf.setInt(DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY, enableMetricsLogging ? 1 : 0); // If enabled, log early and log often - dn = InternalDataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR); + dn = InternalDataNodeTestUtils.startDNWithMockNN( + conf, NN_ADDR, NN_SERVICE_ADDR, DATA_DIR); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java index 8e1e23673af..25650fd1e0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java @@ -109,16 +109,16 @@ public class TestDataNodeMultipleRegistrations { BPOfferService bpos2 = dn.getAllBpOs().get(1); // The order of bpos is not guaranteed, so fix the order - if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) { + if (getNNSocketAddress(bpos1).equals(nn2.getServiceRpcAddress())) { BPOfferService tmp = bpos1; bpos1 = bpos2; bpos2 = tmp; } assertEquals("wrong nn address", getNNSocketAddress(bpos1), - nn1.getNameNodeAddress()); + nn1.getServiceRpcAddress()); assertEquals("wrong nn address", getNNSocketAddress(bpos2), - nn2.getNameNodeAddress()); + nn2.getServiceRpcAddress()); assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1); assertEquals("wrong bpid", bpos2.getBlockPoolId(), bpid2); assertEquals("wrong cid", dn.getClusterId(), cid1); @@ -182,7 +182,7 @@ public class TestDataNodeMultipleRegistrations { assertEquals("wrong nn address", getNNSocketAddress(bpos1), - nn1.getNameNodeAddress()); + nn1.getServiceRpcAddress()); assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1); assertEquals("wrong cid", dn.getClusterId(), cid1); cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 1dfd3c31db6..884c93d2c5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -51,8 +51,10 @@ public class TestDataNodeReconfiguration { private static final Log LOG = LogFactory.getLog(TestBlockRecovery.class); private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory() + "data"; - private final static InetSocketAddress NN_ADDR = new InetSocketAddress( - "localhost", 5020); + private final static InetSocketAddress NN_ADDR = + new InetSocketAddress("localhost", 5020); + private final static InetSocketAddress NN_SERVICE_ADDR = + new InetSocketAddress("localhost", 5021); private final int NUM_NAME_NODE = 1; private final int NUM_DATA_NODE = 10; private MiniDFSCluster cluster; @@ -99,10 +101,13 @@ public class TestDataNodeReconfiguration { conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); + conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + NN_SERVICE_ADDR.getHostName() + ":" + NN_SERVICE_ADDR.getPort()); DataNode[] result = new DataNode[numDateNode]; for (int i = 0; i < numDateNode; i++) { - result[i] = InternalDataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR); + result[i] = InternalDataNodeTestUtils.startDNWithMockNN( + conf, NN_ADDR, NN_SERVICE_ADDR, DATA_DIR); } return result; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java index bb1d9eff0ee..52180213204 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java @@ -78,8 +78,6 @@ public class TestDatanodeProtocolRetryPolicy { ArrayList locations = new ArrayList(); private final static String CLUSTER_ID = "testClusterID"; private final static String POOL_ID = "BP-TEST"; - private final static InetSocketAddress NN_ADDR = new InetSocketAddress( - "localhost", 5020); private static DatanodeRegistration datanodeRegistration = DFSTestUtil.getLocalDatanodeRegistration(); @@ -101,7 +99,7 @@ public class TestDatanodeProtocolRetryPolicy { conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); FileSystem.setDefaultUri(conf, - "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort()); + "hdfs://localhost:5020"); File dataDir = new File(DATA_DIR); FileUtil.fullyDelete(dataDir); dataDir.mkdirs(); @@ -228,7 +226,7 @@ public class TestDatanodeProtocolRetryPolicy { @Override DatanodeProtocolClientSideTranslatorPB connectToNN( InetSocketAddress nnAddr) throws IOException { - Assert.assertEquals(NN_ADDR, nnAddr); + Assert.assertEquals("localhost:9840", nnAddr.toString()); return namenode; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java index f8594ca7eba..37d1b57667d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java @@ -44,6 +44,11 @@ public class TestRefreshNamenodes { private final int nnPort3 = 2227; private final int nnPort4 = 2230; + private final int nnServicePort1 = 2222; + private final int nnServicePort2 = 2225; + private final int nnServicePort3 = 2228; + private final int nnServicePort4 = 2231; + @Test public void testRefreshNamenodes() throws IOException { // Start cluster with a single NN and DN @@ -52,7 +57,9 @@ public class TestRefreshNamenodes { try { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new NSConf("ns1").addNN( - new NNConf(null).setIpcPort(nnPort1))) + new NNConf(null) + .setIpcPort(nnPort1) + .setServicePort(nnServicePort1))) .setFederation(true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) @@ -61,20 +68,20 @@ public class TestRefreshNamenodes { DataNode dn = cluster.getDataNodes().get(0); assertEquals(1, dn.getAllBpOs().size()); - cluster.addNameNode(conf, nnPort2); + cluster.addNameNode(conf, nnPort2, nnServicePort2); assertEquals(2, dn.getAllBpOs().size()); - cluster.addNameNode(conf, nnPort3); + cluster.addNameNode(conf, nnPort3, nnServicePort3); assertEquals(3, dn.getAllBpOs().size()); - cluster.addNameNode(conf, nnPort4); + cluster.addNameNode(conf, nnPort4, nnServicePort4); // Ensure a BPOfferService in the datanodes corresponds to // a namenode in the cluster Set nnAddrsFromCluster = Sets.newHashSet(); for (int i = 0; i < 4; i++) { assertTrue(nnAddrsFromCluster.add( - cluster.getNameNode(i).getNameNodeAddress())); + cluster.getNameNode(i).getServiceRpcAddress())); } Set nnAddrsFromDN = Sets.newHashSet(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 10d9f114793..5c58e0a6102 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -54,6 +54,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Before; +import org.junit.Ignore; import org.junit.Test; import com.google.common.base.Supplier; @@ -61,6 +62,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; +@Ignore("Temporarily disabling the BackupNode unit test.") public class TestBackupNode { public static final Log LOG = LogFactory.getLog(TestBackupNode.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 2e496742655..4282c226430 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -1364,9 +1364,9 @@ public class TestCheckpoint { Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0)); Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1)); InetSocketAddress nn1RpcAddress = cluster.getNameNode(0) - .getNameNodeAddress(); + .getServiceRpcAddress(); InetSocketAddress nn2RpcAddress = cluster.getNameNode(1) - .getNameNodeAddress(); + .getServiceRpcAddress(); String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort(); String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort(); @@ -1923,6 +1923,7 @@ public class TestCheckpoint { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(true).build(); int origPort = cluster.getNameNodePort(); + int origServicePort = cluster.getNameNodeServicePort(); int origHttpPort = cluster.getNameNode().getHttpAddress().getPort(); Configuration snnConf = new Configuration(conf); File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(), @@ -1949,6 +1950,7 @@ public class TestCheckpoint { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .nameNodePort(origPort) + .nameNodeServicePort(origServicePort) .nameNodeHttpPort(origHttpPort) .format(true).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 937bb61c7ec..95a391bb1f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -661,12 +661,15 @@ public class TestNameNodeMXBean { for (int i = 0; i < 5; i++) { try{ // Have to specify IPC ports so the NNs can talk to each other. - int[] ports = ServerSocketUtil.getPorts(2); + int[] ports = ServerSocketUtil.getPorts(4); MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(ports[0])) - .addNN( - new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ports[1]))); + .addNN(new MiniDFSNNTopology.NNConf("nn1") + .setIpcPort(ports[0]) + .setServicePort(ports[1])) + .addNN(new MiniDFSNNTopology.NNConf("nn2") + .setIpcPort(ports[2]) + .setServicePort(ports[3]))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology).numDataNodes(0) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java index 9a0e67c3790..d7216c0be14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java @@ -110,6 +110,7 @@ public class TestNameNodeMetricsLogger { throws IOException { Configuration conf = new HdfsConfiguration(); conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:0"); + conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.setInt(DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_KEY, enableMetricsLogging ? 1 : 0); // If enabled, log early and log often diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java index 0cf1fed81e9..981785a522c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java @@ -125,6 +125,8 @@ public class TestValidateConfigurationSettings { // Set ephemeral ports conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + "127.0.0.1:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java index 169bbeef119..a3671674c78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index b3bb3dda201..4d049708ba4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -171,15 +171,18 @@ public class TestEditLogTailer { for (int i = 0; i < 5; i++) { try { // Have to specify IPC ports so the NNs can talk to each other. - int[] ports = ServerSocketUtil.getPorts(3); + int[] ports = ServerSocketUtil.getPorts(6); MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1") - .setIpcPort(ports[0])) + .setIpcPort(ports[0]) + .setServicePort(ports[1])) .addNN(new MiniDFSNNTopology.NNConf("nn2") - .setIpcPort(ports[1])) + .setIpcPort(ports[2]) + .setServicePort(ports[3])) .addNN(new MiniDFSNNTopology.NNConf("nn3") - .setIpcPort(ports[2]))); + .setIpcPort(ports[4]) + .setServicePort(ports[5]))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) @@ -219,11 +222,14 @@ public class TestEditLogTailer { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1") - .setIpcPort(ServerSocketUtil.getPort(0, 100))) + .setIpcPort(ServerSocketUtil.getPort(0, 100)) + .setServicePort(ServerSocketUtil.getPort(0, 100))) .addNN(new MiniDFSNNTopology.NNConf("nn2") - .setIpcPort(ServerSocketUtil.getPort(0, 100))) + .setIpcPort(ServerSocketUtil.getPort(0, 100)) + .setServicePort(ServerSocketUtil.getPort(0, 100))) .addNN(new MiniDFSNNTopology.NNConf("nn3") - .setIpcPort(ServerSocketUtil.getPort(0, 100)))); + .setIpcPort(ServerSocketUtil.getPort(0, 100)) + .setServicePort(ServerSocketUtil.getPort(0, 100)))); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index f1f5793438f..f2274f91e3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.tools; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -179,9 +180,11 @@ public class TestDFSHAAdmin { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol) .getServiceStatus(); assertEquals(0, runTool("-getAllServiceState")); - assertOutputContains(String.format("%-50s %-10s", (HOST_A + ":" + 12345), + assertOutputContains(String.format("%-50s %-10s", (HOST_A + ":" + + DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT), STANDBY_READY_RESULT.getState())); - assertOutputContains(String.format("%-50s %-10s", (HOST_B + ":" + 12345), + assertOutputContains(String.format("%-50s %-10s", (HOST_B + ":" + + DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT), STANDBY_READY_RESULT.getState())); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java index a21a31d9e0f..d49a3cf8968 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java @@ -77,7 +77,7 @@ public class TestDFSHAAdminMiniCluster { tool.setErrOut(new PrintStream(errOutBytes)); cluster.waitActive(); - nn1Port = cluster.getNameNodePort(0); + nn1Port = cluster.getNameNodeServicePort(0); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java index bbb787e1d75..9943260760c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java @@ -88,9 +88,11 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1") - .setIpcPort(ServerSocketUtil.getPort(10021, 100))) + .setIpcPort(ServerSocketUtil.getPort(10021, 100)) + .setServicePort(ServerSocketUtil.getPort(10025, 100))) .addNN(new MiniDFSNNTopology.NNConf("nn2") - .setIpcPort(ServerSocketUtil.getPort(10022, 100)))); + .setIpcPort(ServerSocketUtil.getPort(10022, 100)) + .setServicePort(ServerSocketUtil.getPort(10026, 100)))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java index 942719e1fc8..36d57a9bc79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -121,13 +122,13 @@ public class TestGetConf { TestType type, HdfsConfiguration conf) throws IOException { switch (type) { case NAMENODE: - return DFSUtil.getNNServiceRpcAddressesForCluster(conf); + return DFSUtil.getNNServiceRpcAddresses(conf); case BACKUP: return DFSUtil.getBackupNodeAddresses(conf); case SECONDARY: return DFSUtil.getSecondaryNameNodeAddresses(conf); case NNRPCADDRESSES: - return DFSUtil.getNNServiceRpcAddressesForCluster(conf); + return DFSUtil.getNNServiceRpcAddresses(conf); } return null; } @@ -278,10 +279,12 @@ public class TestGetConf { public void testNonFederation() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(false); - // Returned namenode address should match default address + // Returned namenode address should match the default service address conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:1000"); - verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000"); - verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000"); + verifyAddresses(conf, TestType.NAMENODE, false, "localhost:" + + DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT); + verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:" + + DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT); // Returned address should match backupnode RPC address conf.set(DFS_NAMENODE_BACKUP_ADDRESS_KEY,"localhost:1001"); @@ -297,12 +300,14 @@ public class TestGetConf { conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001"); verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000"); verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000"); - - // Returned address should match RPC address + + // Returned namenode address should match the default service address conf = new HdfsConfiguration(); conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001"); - verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1001"); - verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1001"); + verifyAddresses(conf, TestType.NAMENODE, false, "localhost:" + + DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT); + verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:" + + DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT); } /** @@ -330,23 +335,6 @@ public class TestGetConf { verifyAddresses(conf, TestType.BACKUP, false, backupAddresses); verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses); verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses); - - // Test to ensure namenode, backup, secondary namenode addresses and - // namenode rpc addresses are returned from federation configuration. - // Returned namenode addresses are based on regular RPC address - // in the absence of service RPC address. - conf = new HdfsConfiguration(false); - setupNameServices(conf, nsCount); - nnAddresses = setupAddress(conf, - DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1000); - backupAddresses = setupAddress(conf, - DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000); - secondaryAddresses = setupAddress(conf, - DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000); - verifyAddresses(conf, TestType.NAMENODE, false, nnAddresses); - verifyAddresses(conf, TestType.BACKUP, false, backupAddresses); - verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses); - verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses); } @Test(timeout=10000)