svn merge -c 1593475 merging from trunk to branch-2 to fix:HDFS-6313. WebHdfs may use the wrong NN when configured for multiple HA NNs. Contributed by Kihwal Lee.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1593476 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2014-05-09 02:03:27 +00:00
parent 26601f37b0
commit d39b25b0e3
4 changed files with 65 additions and 6 deletions

View File

@ -249,6 +249,9 @@ Release 2.4.1 - UNRELEASED
HDFS-6329. WebHdfs does not work if HA is enabled on NN but logical URI is HDFS-6329. WebHdfs does not work if HA is enabled on NN but logical URI is
not configured. (kihwal) not configured. (kihwal)
HDFS-6313. WebHdfs may use the wrong NN when configured for multiple HA NNs
(kihwal)
Release 2.4.0 - 2014-04-07 Release 2.4.0 - 2014-04-07
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -101,6 +101,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
@ -1112,10 +1113,10 @@ public class WebHdfsFileSystem extends FileSystem
Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
.getHaNnWebHdfsAddresses(conf, scheme); .getHaNnWebHdfsAddresses(conf, scheme);
for (Map<String, InetSocketAddress> addrs : addresses.values()) { // Extract the entry corresponding to the logical name.
for (InetSocketAddress addr : addrs.values()) { Map<String, InetSocketAddress> addrs = addresses.get(uri.getHost());
ret.add(addr); for (InetSocketAddress addr : addrs.values()) {
} ret.add(addr);
} }
} }
@ -1128,4 +1129,9 @@ public class WebHdfsFileSystem extends FileSystem
return tokenServiceName == null ? super.getCanonicalServiceName() return tokenServiceName == null ? super.getCanonicalServiceName()
: tokenServiceName.toString(); : tokenServiceName.toString();
} }
@VisibleForTesting
InetSocketAddress[] getResolvedNNAddr() {
return nnAddrs;
}
} }

View File

@ -148,16 +148,40 @@ public class DFSTestUtil {
*/ */
public static Configuration newHAConfiguration(final String logicalName) { public static Configuration newHAConfiguration(final String logicalName) {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName); addHAConfiguration(conf, logicalName);
return conf;
}
/**
* Add a new HA configuration.
*/
public static void addHAConfiguration(Configuration conf,
final String logicalName) {
String nsIds = conf.get(DFSConfigKeys.DFS_NAMESERVICES);
if (nsIds == null) {
conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName);
} else { // append the nsid
conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsIds + "," + logicalName);
}
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
logicalName), "nn1,nn2"); logicalName), "nn1,nn2");
conf.set(DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "" + conf.set(DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "" +
"." + logicalName, "." + logicalName,
ConfiguredFailoverProxyProvider.class.getName()); ConfiguredFailoverProxyProvider.class.getName());
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
return conf;
} }
public static void setFakeHttpAddresses(Configuration conf,
final String logicalName) {
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
logicalName, "nn1"), "127.0.0.1:12345");
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
logicalName, "nn2"), "127.0.0.1:12346");
}
/** class MyFile contains enough information to recreate the contents of /** class MyFile contains enough information to recreate the contents of
* a single file. * a single file.
*/ */

View File

@ -156,4 +156,30 @@ public class TestWebHDFSForHA {
} }
} }
} }
@Test
public void testMultipleNamespacesConfigured() throws Exception {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
.numDataNodes(1).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");
fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
Assert.assertEquals(2, fs.getResolvedNNAddr().length);
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
} }