diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 51ba3ba6ba7..a2d3d5d08e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -772,12 +772,7 @@ static Collection getNameServiceUris(Configuration conf, Set nonPreferredUris = new HashSet(); for (String nsId : nameServices) { - URI nsUri; - try { - nsUri = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId); - } catch (URISyntaxException ue) { - throw new IllegalArgumentException(ue); - } + URI nsUri = createUri(HdfsConstants.HDFS_URI_SCHEME, nsId, -1); /** * Determine whether the logical URI of the name service can be resolved * by the configured failover proxy provider. If not, we should try to @@ -817,7 +812,8 @@ static Collection getNameServiceUris(Configuration conf, for (String key : keys) { String addr = conf.get(key); if (addr != null) { - URI uri = createUri("hdfs", NetUtils.createSocketAddr(addr)); + URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME, + NetUtils.createSocketAddr(addr)); if (!uriFound) { uriFound = true; ret.add(uri); @@ -835,19 +831,21 @@ static Collection getNameServiceUris(Configuration conf, // nor the rpc-address (which overrides defaultFS) is given. if (!uriFound) { URI defaultUri = FileSystem.getDefaultUri(conf); + if (defaultUri != null) { + // checks if defaultUri is ip:port format + // and convert it to hostname:port format + if (defaultUri.getPort() != -1) { + defaultUri = createUri(defaultUri.getScheme(), + NetUtils.createSocketAddr(defaultUri.getHost(), + defaultUri.getPort())); + } - // checks if defaultUri is ip:port format - // and convert it to hostname:port format - if (defaultUri != null && (defaultUri.getPort() != -1)) { - defaultUri = createUri(defaultUri.getScheme(), - NetUtils.createSocketAddr(defaultUri.getHost(), - defaultUri.getPort())); - } + defaultUri = trimUri(defaultUri); - if (defaultUri != null && - HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) && - !nonPreferredUris.contains(defaultUri)) { - ret.add(defaultUri); + if (HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) && + !nonPreferredUris.contains(defaultUri)) { + ret.add(defaultUri); + } } } @@ -1180,7 +1178,26 @@ private interface AddressMatcher { public static URI createUri(String scheme, InetSocketAddress address) { return DFSUtilClient.createUri(scheme, address); } - + + /** Create an URI from scheme, host, and port. */ + public static URI createUri(String scheme, String host, int port) { + try { + return new URI(scheme, null, host, port, null, null, null); + } catch (URISyntaxException x) { + throw new IllegalArgumentException(x.getMessage(), x); + } + } + + /** Remove unnecessary path from HDFS URI. */ + static URI trimUri(URI uri) { + String path = uri.getPath(); + if (HdfsConstants.HDFS_URI_SCHEME.equals(uri.getScheme()) && + path != null && !path.isEmpty()) { + uri = createUri(uri.getScheme(), uri.getHost(), uri.getPort()); + } + return uri; + } + /** * Add protobuf based protocol to the {@link org.apache.hadoop.ipc.RPC.Server} * @param conf configuration diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 1f2ded6d8a1..c44204a635a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -82,7 +82,11 @@ import com.google.common.collect.Sets; public class TestDFSUtil { - + + static final String NS1_NN_ADDR = "ns1-nn.example.com:9820"; + static final String NS1_NN1_ADDR = "ns1-nn1.example.com:9820"; + static final String NS1_NN2_ADDR = "ns1-nn2.example.com:9820"; + /** * Reset to default UGI settings since some tests change them. */ @@ -588,8 +592,6 @@ public void getNameNodeServiceAddr() throws IOException { @Test public void testGetHaNnHttpAddresses() throws IOException { final String LOGICAL_HOST_NAME = "ns1"; - final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020"; - final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020"; Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR); @@ -638,9 +640,6 @@ private static Collection getInternalNameServiceUris(Configuration conf, public void testGetNNUris() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); - final String NS1_NN_ADDR = "ns1-nn.example.com:8020"; - final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020"; - final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020"; final String NS2_NN_ADDR = "ns2-nn.example.com:8020"; final String NN1_ADDR = "nn.example.com:8020"; final String NN1_SRVC_ADDR = "nn.example.com:8021"; @@ -649,12 +648,10 @@ public void testGetNNUris() throws Exception { conf.set(DFS_NAMESERVICES, "ns1"); conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1"), NS1_NN1_ADDR); - conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "hdfs://" + NN2_ADDR); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR); Collection uris = DFSUtil.getInternalNsRpcUris(conf); - assertEquals("Incorrect number of URIs returned", 2, uris.size()); assertTrue("Missing URI for name service ns1", uris.contains(new URI("hdfs://" + NS1_NN1_ADDR))); @@ -669,15 +666,11 @@ public void testGetNNUris() throws Exception { DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_ADDR); conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_ADDR); - conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1"), NS1_NN_ADDR); - conf.set(DFSUtil.addKeySuffixes( DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns2"), NS2_NN_ADDR); - conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "hdfs://" + NN1_ADDR); - conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN2_ADDR); /** @@ -715,7 +708,6 @@ public void testGetNNUris() throws Exception { + "ConfiguredFailoverProxyProvider"); uris = DFSUtil.getInternalNsRpcUris(conf); - assertEquals("Incorrect number of URIs returned", 3, uris.size()); assertTrue("Missing URI for name service ns1", uris.contains(new URI("hdfs://ns1"))); @@ -729,7 +721,6 @@ public void testGetNNUris() throws Exception { "viewfs://vfs-name.example.com"); uris = DFSUtil.getInternalNsRpcUris(conf); - assertEquals("Incorrect number of URIs returned", 3, uris.size()); assertTrue("Missing URI for name service ns1", uris.contains(new URI("hdfs://ns1"))); @@ -743,7 +734,6 @@ public void testGetNNUris() throws Exception { conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1"); uris = DFSUtil.getInternalNsRpcUris(conf); - assertEquals("Incorrect number of URIs returned", 3, uris.size()); assertTrue("Missing URI for name service ns1", uris.contains(new URI("hdfs://ns1"))); @@ -757,7 +747,6 @@ public void testGetNNUris() throws Exception { conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR); uris = DFSUtil.getInternalNsRpcUris(conf); - assertEquals("Incorrect number of URIs returned", 1, uris.size()); assertTrue("Missing URI for RPC address (defaultFS)", uris.contains(new URI("hdfs://" + NN1_ADDR))); @@ -767,7 +756,6 @@ public void testGetNNUris() throws Exception { conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN2_ADDR); uris = DFSUtil.getInternalNsRpcUris(conf); - assertEquals("Incorrect number of URIs returned", 1, uris.size()); assertTrue("Missing URI for RPC address", uris.contains(new URI("hdfs://" + NN2_ADDR))); @@ -779,7 +767,6 @@ public void testGetNNUris() throws Exception { conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_ADDR); uris = DFSUtil.getInternalNsRpcUris(conf); - assertEquals("Incorrect number of URIs returned", 1, uris.size()); assertTrue("Missing URI for service ns1", uris.contains(new URI("hdfs://" + NN1_ADDR))); @@ -791,12 +778,41 @@ public void testGetNNUris() throws Exception { conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR); uris = DFSUtil.getInternalNsRpcUris(conf); - assertEquals("Incorrect number of URIs returned", 1, uris.size()); assertTrue("Missing URI for service address", uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR))); } + @Test + public void testGetNNUris2() throws Exception { + // Make sure that an HA URI plus a slash being the default URI doesn't + // result in multiple entries being returned. + HdfsConfiguration conf = new HdfsConfiguration(); + conf.set(DFS_NAMESERVICES, "ns1"); + conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"), + "nn1,nn2"); + conf.set(DFSUtil.addKeySuffixes( + DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_ADDR); + conf.set(DFSUtil.addKeySuffixes( + DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_ADDR); + + conf.set(DFSUtil.addKeySuffixes( + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1"), NS1_NN_ADDR); + + String proxyProviderKey = HdfsClientConfigKeys.Failover. + PROXY_PROVIDER_KEY_PREFIX + ".ns1"; + conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha." + + "ConfiguredFailoverProxyProvider"); + + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1/"); + + Collection uris = DFSUtil.getInternalNsRpcUris(conf); + + assertEquals("Incorrect number of URIs returned", 1, uris.size()); + assertTrue("Missing URI for name service ns1", + uris.contains(new URI("hdfs://ns1"))); + } + @Test (timeout=15000) public void testLocalhostReverseLookup() { // 127.0.0.1 -> localhost reverse resolution does not happen on Windows.