diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 22e92df97c4..68c387ee2d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -709,9 +709,16 @@ public class DFSUtil { "nnId=" + namenodeId + ";addr=" + addr + "]"; } } - + + /** @return Internal name services specified in the conf. */ + static Collection getInternalNameServices(Configuration conf) { + final Collection ids = conf.getTrimmedStringCollection( + DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY); + return !ids.isEmpty()? ids: DFSUtilClient.getNameServiceIds(conf); + } + /** - * Get a URI for each configured nameservice. If a nameservice is + * Get a URI for each internal nameservice. If a nameservice is * HA-enabled, then the logical URI of the nameservice is returned. If the * nameservice is not HA-enabled, then a URI corresponding to an RPC address * of the single NN for that nameservice is returned, preferring the service @@ -721,8 +728,8 @@ public class DFSUtil { * @return a collection of all configured NN URIs, preferring service * addresses */ - public static Collection getNsServiceRpcUris(Configuration conf) { - return getNameServiceUris(conf, + public static Collection getInternalNsRpcUris(Configuration conf) { + return getNameServiceUris(conf, getInternalNameServices(conf), DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); } @@ -738,8 +745,8 @@ public class DFSUtil { * nameservices * @return a collection of all configured NN URIs */ - public static Collection getNameServiceUris(Configuration conf, - String... keys) { + static Collection getNameServiceUris(Configuration conf, + Collection nameServices, String... keys) { Set ret = new HashSet(); // We're passed multiple possible configuration keys for any given NN or HA @@ -749,7 +756,7 @@ public class DFSUtil { // keep track of non-preferred keys here. Set nonPreferredUris = new HashSet(); - for (String nsId : DFSUtilClient.getNameServiceIds(conf)) { + for (String nsId : nameServices) { if (HAUtil.isHAEnabled(conf, nsId)) { // Add the logical URI of the nameservice. try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 4e94b87fab1..c515fb92e0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -743,7 +743,7 @@ public class Balancer { try { checkReplicationPolicyCompatibility(conf); - final Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); return Balancer.run(namenodes, parse(args), conf); } catch (IOException e) { System.out.println(e + ". Exiting ..."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index 3c632633b54..6c05f2fed3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -645,7 +645,7 @@ public class Mover { } else if (line.hasOption("p")) { paths = line.getOptionValues("p"); } - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); if (paths == null || paths.length == 0) { for (URI namenode : namenodes) { map.put(namenode, null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index f22deafa857..62bbca73892 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -78,6 +78,8 @@ import org.junit.Assume; import org.junit.Before; import org.junit.Test; +import com.google.common.collect.Sets; + public class TestDFSUtil { /** @@ -532,7 +534,7 @@ public class TestDFSUtil { assertEquals(null, DFSUtil.getNamenodeNameServiceId(conf)); assertEquals(null, DFSUtil.getSecondaryNameServiceId(conf)); - Collection uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); + Collection uris = getInternalNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(2, uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://ns2"))); @@ -615,7 +617,13 @@ public class TestDFSUtil { assertEquals("127.0.0.1:12345", DFSUtil.substituteForWildcardAddress("127.0.0.1:12345", "foo")); } - + + private static Collection getInternalNameServiceUris(Configuration conf, + String... keys) { + final Collection ids = DFSUtil.getInternalNameServices(conf); + return DFSUtil.getNameServiceUris(conf, ids, keys); + } + /** * Test how name service URIs are handled with a variety of configuration * settings @@ -639,8 +647,7 @@ public class TestDFSUtil { conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "hdfs://" + NN2_ADDR); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR); - Collection uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + Collection uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 2, uris.size()); assertTrue("Missing URI for name service ns1", @@ -664,8 +671,7 @@ public class TestDFSUtil { conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN2_ADDR); - uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 3, uris.size()); assertTrue("Missing URI for name service ns1", @@ -679,8 +685,7 @@ public class TestDFSUtil { conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "viewfs://vfs-name.example.com"); - uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 3, uris.size()); assertTrue("Missing URI for name service ns1", @@ -694,8 +699,7 @@ public class TestDFSUtil { // entries being returned. conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1"); - uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 3, uris.size()); assertTrue("Missing URI for name service ns1", @@ -709,8 +713,7 @@ public class TestDFSUtil { conf = new HdfsConfiguration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR); - uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 1, uris.size()); assertTrue("Missing URI for RPC address (defaultFS)", @@ -720,8 +723,7 @@ public class TestDFSUtil { // and the default FS is given. conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN2_ADDR); - uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 1, uris.size()); assertTrue("Missing URI for RPC address", @@ -733,8 +735,7 @@ public class TestDFSUtil { // returned. conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_ADDR); - uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 1, uris.size()); assertTrue("Missing URI for service ns1", @@ -746,8 +747,7 @@ public class TestDFSUtil { conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR); conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR); - uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 1, uris.size()); assertTrue("Missing URI for service address", @@ -763,7 +763,7 @@ public class TestDFSUtil { // it will automatically convert it to hostname HdfsConfiguration conf = new HdfsConfiguration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020"); - Collection uris = DFSUtil.getNameServiceUris(conf); + Collection uris = getInternalNameServiceUris(conf); assertEquals(1, uris.size()); for (URI uri : uris) { assertThat(uri.getHost(), not("127.0.0.1")); @@ -950,10 +950,19 @@ public class TestDFSUtil { conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS); + { + Collection internal = DFSUtil.getInternalNameServices(conf); + assertEquals(Sets.newHashSet("nn1"), internal); + + Collection all = DFSUtilClient.getNameServiceIds(conf); + assertEquals(Sets.newHashSet("nn1", "nn2"), all); + } + Map> nnMap = DFSUtil .getNNServiceRpcAddressesForCluster(conf); assertEquals(1, nnMap.size()); assertTrue(nnMap.containsKey("nn1")); + conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "nn3"); try { DFSUtil.getNNServiceRpcAddressesForCluster(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index a24cf9539e4..59a19eaad19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -390,7 +390,7 @@ public class TestBalancer { waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); // start rebalancing - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf); assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); } @@ -474,7 +474,7 @@ public class TestBalancer { waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); // start rebalancing - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Balancer.run(namenodes, BalancerParameters.DEFAULT, conf); BlockPlacementPolicy placementPolicy = cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy(); @@ -782,7 +782,7 @@ public class TestBalancer { waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); // start rebalancing - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); final int r = runBalancer(namenodes, p, conf); if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) { @@ -979,7 +979,7 @@ public class TestBalancer { new String[]{RACK0}, null,new long[]{CAPACITY}); cluster.triggerHeartbeats(); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Set datanodes = new HashSet(); datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName()); BalancerParameters.Builder pBuilder = @@ -1446,7 +1446,7 @@ public class TestBalancer { null, null, storageCapacities, null, false, false, false, null); cluster.triggerHeartbeats(); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); // Run Balancer final BalancerParameters p = BalancerParameters.DEFAULT; @@ -1493,7 +1493,7 @@ public class TestBalancer { // Add another DN with the same capacity, cluster is now unbalanced cluster.startDataNodes(conf, 1, true, null, null); cluster.triggerHeartbeats(); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); // Run balancer final BalancerParameters p = BalancerParameters.DEFAULT; @@ -1572,7 +1572,7 @@ public class TestBalancer { cluster.triggerHeartbeats(); BalancerParameters p = BalancerParameters.DEFAULT; - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); final int r = Balancer.run(namenodes, p, conf); // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK) @@ -1690,7 +1690,7 @@ public class TestBalancer { LOG.info("lengths = " + Arrays.toString(lengths) + ", #=" + lengths.length); waitForHeartBeat(totalUsed, 2*capacities[0]*capacities.length, client, cluster); - final Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); { // run Balancer with min-block-size=50 BalancerParameters.Builder b = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index 1693cf182c0..14441931fe9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -94,7 +94,7 @@ public class TestBalancerWithHANameNodes { totalCapacity += newNodeCapacity; TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); assertEquals(1, namenodes.size()); assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster))); final int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index 5676ea43bbd..f01d79e99be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -168,7 +168,7 @@ public class TestBalancerWithMultipleNameNodes { getStorageReports(s); // start rebalancing - final Collection namenodes = DFSUtil.getNsServiceRpcUris(s.conf); + final Collection namenodes = DFSUtil.getInternalNsRpcUris(s.conf); final int r = Balancer.run(namenodes, s.parameters, s.conf); Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), r); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index bfa2835ac7f..a6732c7c978 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -174,7 +174,7 @@ public class TestBalancerWithNodeGroup { waitForHeartBeat(totalUsedSpace, totalCapacity); // start rebalancing - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); final int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf); assertEquals(ExitStatus.SUCCESS.getExitCode(), r); @@ -188,7 +188,7 @@ public class TestBalancerWithNodeGroup { waitForHeartBeat(totalUsedSpace, totalCapacity); // start rebalancing - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); final int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf); Assert.assertTrue(r == ExitStatus.SUCCESS.getExitCode() || (r == ExitStatus.NO_MOVE_PROGRESS.getExitCode())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index d3d814c10d2..1c47f432bfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -63,7 +63,7 @@ public class TestMover { } static Mover newMover(Configuration conf) throws IOException { - final Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Map> nnMap = Maps.newHashMap(); for (URI nn : namenodes) { @@ -182,7 +182,7 @@ public class TestMover { } Map> movePaths = Mover.Cli.getNameNodePathsToMove(conf); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); @@ -190,7 +190,7 @@ public class TestMover { Assert.assertNull(movePaths.get(nn)); movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); - namenodes = DFSUtil.getNsServiceRpcUris(conf); + namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, movePaths.size()); nn = namenodes.iterator().next(); Assert.assertTrue(movePaths.containsKey(nn)); @@ -211,7 +211,7 @@ public class TestMover { try { Map> movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); @@ -232,7 +232,7 @@ public class TestMover { final Configuration conf = new HdfsConfiguration(); DFSTestUtil.setFederatedConfiguration(cluster, conf); try { - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(3, namenodes.size()); try { @@ -280,7 +280,7 @@ public class TestMover { final Configuration conf = new HdfsConfiguration(); DFSTestUtil.setFederatedHAConfiguration(cluster, conf); try { - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(3, namenodes.size()); Iterator iter = namenodes.iterator(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index 9565397e9ec..549dbc82eea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -270,7 +270,7 @@ public class TestStorageMover { } private void runMover(ExitStatus expectedExitCode) throws Exception { - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Map> nnMap = Maps.newHashMap(); for (URI nn : namenodes) { nnMap.put(nn, null);