diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index bef4d6a318d..8795ad0de10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -42,6 +42,9 @@ Release 2.6.5 - UNRELEASED HDFS-10178. Permanent write failures can happen if pipeline recoveries occur for the first packet (kihwal) + HDFS-9365. Balancer does not work with the HDFS-6376 HA setup. (Tsz Wo + Nicholas Sze) + Release 2.6.4 - 2016-02-11 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 67880cd991a..73b2556ac24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -962,7 +962,14 @@ public String toString() { "nnId=" + namenodeId + ";addr=" + addr + "]"; } } - + + /** @return Internal name services specified in the conf. */ + static Collection getInternalNameServices(Configuration conf) { + final Collection ids = conf.getTrimmedStringCollection( + DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY); + return !ids.isEmpty()? ids: getNameServiceIds(conf); + } + /** * Get a URI for each internal nameservice. If a nameservice is * HA-enabled, and the configured failover proxy provider supports logical @@ -975,8 +982,8 @@ public String toString() { * @return a collection of all configured NN URIs, preferring service * addresses */ - public static Collection getNsServiceRpcUris(Configuration conf) { - return getNameServiceUris(conf, + public static Collection getInternalNsRpcUris(Configuration conf) { + return getNameServiceUris(conf, getInternalNameServices(conf), DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); } @@ -993,8 +1000,8 @@ public static Collection getNsServiceRpcUris(Configuration conf) { * nameservices * @return a collection of all configured NN URIs */ - public static Collection getNameServiceUris(Configuration conf, - String... keys) { + static Collection getNameServiceUris(Configuration conf, + Collection nameServices, String... keys) { Set ret = new HashSet(); // We're passed multiple possible configuration keys for any given NN or HA @@ -1004,7 +1011,7 @@ public static Collection getNameServiceUris(Configuration conf, // keep track of non-preferred keys here. Set nonPreferredUris = new HashSet(); - for (String nsId : getNameServiceIds(conf)) { + for (String nsId : nameServices) { URI nsUri; try { nsUri = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index eeac6ee24b9..cc05368bc4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -670,7 +670,7 @@ public int run(String[] args) { try { checkReplicationPolicyCompatibility(conf); - final Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); return Balancer.run(namenodes, parse(args), conf); } catch (IOException e) { System.out.println(e + ". Exiting ..."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index 59814af5512..95c8dd76f61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -577,7 +577,7 @@ private static Map> getNameNodePaths(CommandLine line, } else if (line.hasOption("p")) { paths = line.getOptionValues("p"); } - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); if (paths == null || paths.length == 0) { for (URI namenode : namenodes) { map.put(namenode, null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 2a43db2f24a..af23ddbef5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -79,6 +79,8 @@ import org.junit.Before; import org.junit.Test; +import com.google.common.collect.Sets; + public class TestDFSUtil { /** @@ -533,7 +535,7 @@ public void testHANameNodesWithFederation() throws URISyntaxException { ".ns2"; conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha." + "ConfiguredFailoverProxyProvider"); - Collection uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); + Collection uris = getInternalNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(2, uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://ns2"))); @@ -616,7 +618,13 @@ public void testSubstituteForWildcardAddress() throws IOException { assertEquals("127.0.0.1:12345", DFSUtil.substituteForWildcardAddress("127.0.0.1:12345", "foo")); } - + + private static Collection getInternalNameServiceUris(Configuration conf, + String... keys) { + final Collection ids = DFSUtil.getInternalNameServices(conf); + return DFSUtil.getNameServiceUris(conf, ids, keys); + } + @Test public void testGetNNUris() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); @@ -659,8 +667,7 @@ public void testGetNNUris() throws Exception { ".ns1"; conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha." + "IPFailoverProxyProvider"); - Collection uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + Collection uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 4, uris.size()); assertTrue("Missing URI for name service ns1", @@ -680,8 +687,7 @@ public void testGetNNUris() throws Exception { conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha." + "ConfiguredFailoverProxyProvider"); - uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 4, uris.size()); assertTrue("Missing URI for name service ns1", @@ -697,8 +703,7 @@ public void testGetNNUris() throws Exception { conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "viewfs://vfs-name.example.com"); - uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, - DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals(3, uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); @@ -709,8 +714,7 @@ public void testGetNNUris() throws Exception { // entries being returned. conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1"); - uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, - DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals(3, uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); @@ -726,8 +730,7 @@ public void testGetNNUris() throws Exception { conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN1_ADDR); conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR); - uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, - DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals(1, uris.size()); assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR))); @@ -742,7 +745,7 @@ public void testLocalhostReverseLookup() { // it will automatically convert it to hostname HdfsConfiguration conf = new HdfsConfiguration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020"); - Collection uris = DFSUtil.getNameServiceUris(conf); + Collection uris = getInternalNameServiceUris(conf); assertEquals(1, uris.size()); for (URI uri : uris) { assertThat(uri.getHost(), not("127.0.0.1")); @@ -929,10 +932,19 @@ public void testGetNNServiceRpcAddressesForNsIds() throws IOException { conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS); + { + Collection internal = DFSUtil.getInternalNameServices(conf); + assertEquals(Sets.newHashSet("nn1"), internal); + + Collection all = DFSUtil.getNameServiceIds(conf); + assertEquals(Sets.newHashSet("nn1", "nn2"), all); + } + Map> nnMap = DFSUtil .getNNServiceRpcAddressesForCluster(conf); assertEquals(1, nnMap.size()); assertTrue(nnMap.containsKey("nn1")); + conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "nn3"); try { DFSUtil.getNNServiceRpcAddressesForCluster(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 16dbdfd5d0a..fc7c73dd8e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -599,7 +599,7 @@ private void runBalancer(Configuration conf, waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); // start rebalancing - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); final int r = runBalancer(namenodes, p, conf); if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) { @@ -795,7 +795,7 @@ public void testUnknownDatanode() throws Exception { new String[]{RACK0}, null,new long[]{CAPACITY}); cluster.triggerHeartbeats(); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Set datanodes = new HashSet(); datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName()); Balancer.Parameters p = new Balancer.Parameters( @@ -1229,7 +1229,7 @@ public void testBalancerWithRamDisk() throws Exception { null, null, storageCapacities, null, false, false, false, null); cluster.triggerHeartbeats(); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); // Run Balancer Balancer.Parameters p = new Balancer.Parameters( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index bd9136655f6..67049824eb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -94,7 +94,7 @@ public void testBalancerWithHANameNodes() throws Exception { totalCapacity += newNodeCapacity; TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); assertEquals(1, namenodes.size()); assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster))); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index 6ee6e545416..6fd4d5a0104 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -158,7 +158,7 @@ static void runBalancer(Suite s, LOG.info("BALANCER 1"); // start rebalancing - final Collection namenodes = DFSUtil.getNsServiceRpcUris(s.conf); + final Collection namenodes = DFSUtil.getInternalNsRpcUris(s.conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, s.conf); Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), r); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index 7af3a0e7d7d..d6280a30146 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -174,7 +174,7 @@ private void runBalancer(Configuration conf, waitForHeartBeat(totalUsedSpace, totalCapacity); // start rebalancing - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); assertEquals(ExitStatus.SUCCESS.getExitCode(), r); @@ -188,7 +188,7 @@ private void runBalancerCanFinish(Configuration conf, waitForHeartBeat(totalUsedSpace, totalCapacity); // start rebalancing - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); Assert.assertTrue(r == ExitStatus.SUCCESS.getExitCode() || (r == ExitStatus.NO_MOVE_PROGRESS.getExitCode())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 5866c7f7384..5ed46425c7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -37,7 +37,7 @@ public class TestMover { static Mover newMover(Configuration conf) throws IOException { - final Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, namenodes.size()); final List nncs = NameNodeConnector.newNameNodeConnectors( @@ -104,7 +104,7 @@ public void testMoverCli() throws Exception { } Map> movePaths = Mover.Cli.getNameNodePathsToMove(conf); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); @@ -112,7 +112,7 @@ public void testMoverCli() throws Exception { Assert.assertNull(movePaths.get(nn)); movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); - namenodes = DFSUtil.getNsServiceRpcUris(conf); + namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, movePaths.size()); nn = namenodes.iterator().next(); Assert.assertTrue(movePaths.containsKey(nn)); @@ -133,7 +133,7 @@ public void testMoverCliWithHAConf() throws Exception { try { Map> movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); @@ -154,7 +154,7 @@ public void testMoverCliWithFederation() throws Exception { final Configuration conf = new HdfsConfiguration(); DFSTestUtil.setFederatedConfiguration(cluster, conf); try { - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(3, namenodes.size()); try { @@ -202,7 +202,7 @@ public void testMoverCliWithFederationHA() throws Exception { final Configuration conf = new HdfsConfiguration(); DFSTestUtil.setFederatedHAConfiguration(cluster, conf); try { - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(3, namenodes.size()); Iterator iter = namenodes.iterator(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index 0425dc40044..5e7efbe0956 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -270,7 +270,7 @@ void verify(boolean verifyAll) throws Exception { } private void runMover() throws Exception { - Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); Map> nnMap = Maps.newHashMap(); for (URI nn : namenodes) { nnMap.put(nn, null);