From d39b25b0e3b3ee75b1c059f88546397b4dda3d07 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Fri, 9 May 2014 02:03:27 +0000 Subject: [PATCH] svn merge -c 1593475 merging from trunk to branch-2 to fix:HDFS-6313. WebHdfs may use the wrong NN when configured for multiple HA NNs. Contributed by Kihwal Lee. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1593476 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hadoop/hdfs/web/WebHdfsFileSystem.java | 14 +++++++--- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 28 +++++++++++++++++-- .../hadoop/hdfs/web/TestWebHDFSForHA.java | 26 +++++++++++++++++ 4 files changed, 65 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 03177447e91..176a8472ce7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -249,6 +249,9 @@ Release 2.4.1 - UNRELEASED HDFS-6329. WebHdfs does not work if HA is enabled on NN but logical URI is not configured. (kihwal) + HDFS-6313. WebHdfs may use the wrong NN when configured for multiple HA NNs + (kihwal) + Release 2.4.0 - 2014-04-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 413ee6922ba..3d9aacc2268 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -101,6 +101,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.Progressable; import org.mortbay.util.ajax.JSON; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.collect.Lists; @@ -1112,10 +1113,10 @@ public class WebHdfsFileSystem extends FileSystem Map> addresses = DFSUtil .getHaNnWebHdfsAddresses(conf, scheme); - for (Map addrs : addresses.values()) { - for (InetSocketAddress addr : addrs.values()) { - ret.add(addr); - } + // Extract the entry corresponding to the logical name. + Map addrs = addresses.get(uri.getHost()); + for (InetSocketAddress addr : addrs.values()) { + ret.add(addr); } } @@ -1128,4 +1129,9 @@ public class WebHdfsFileSystem extends FileSystem return tokenServiceName == null ? super.getCanonicalServiceName() : tokenServiceName.toString(); } + + @VisibleForTesting + InetSocketAddress[] getResolvedNNAddr() { + return nnAddrs; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index ce9f0d7ff1a..8ccc8fc82e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -148,16 +148,40 @@ public class DFSTestUtil { */ public static Configuration newHAConfiguration(final String logicalName) { Configuration conf = new Configuration(); - conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName); + addHAConfiguration(conf, logicalName); + return conf; + } + + /** + * Add a new HA configuration. + */ + public static void addHAConfiguration(Configuration conf, + final String logicalName) { + String nsIds = conf.get(DFSConfigKeys.DFS_NAMESERVICES); + if (nsIds == null) { + conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName); + } else { // append the nsid + conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsIds + "," + logicalName); + } conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, logicalName), "nn1,nn2"); conf.set(DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "" + "." + logicalName, ConfiguredFailoverProxyProvider.class.getName()); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); - return conf; } + public static void setFakeHttpAddresses(Configuration conf, + final String logicalName) { + conf.set(DFSUtil.addKeySuffixes( + DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, + logicalName, "nn1"), "127.0.0.1:12345"); + conf.set(DFSUtil.addKeySuffixes( + DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, + logicalName, "nn2"), "127.0.0.1:12346"); + } + + /** class MyFile contains enough information to recreate the contents of * a single file. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java index 8ff3398ad4b..772e367f93c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java @@ -156,4 +156,30 @@ public class TestWebHDFSForHA { } } } + + @Test + public void testMultipleNamespacesConfigured() throws Exception { + Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); + MiniDFSCluster cluster = null; + WebHdfsFileSystem fs = null; + + try { + cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) + .numDataNodes(1).build(); + + HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); + + cluster.waitActive(); + DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote"); + DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote"); + + fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf); + Assert.assertEquals(2, fs.getResolvedNNAddr().length); + } finally { + IOUtils.cleanup(null, fs); + if (cluster != null) { + cluster.shutdown(); + } + } + } }