From 7e10509fe43033ac3218b8bf9ef5eeac03907e4f Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Wed, 20 Nov 2013 22:00:32 +0000 Subject: [PATCH] HDFS-3987. Merge change r1543962 from trunk. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1543967 13f79535-47bb-0310-9956-ffa450edef68 --- .../client/KerberosAuthenticator.java | 8 ++- .../org/apache/hadoop/http/HttpServer.java | 4 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 40 +++++++---- .../hadoop/hdfs/server/namenode/NameNode.java | 4 ++ .../web/resources/NamenodeWebHdfsMethods.java | 5 +- .../hadoop/hdfs/web/SWebHdfsFileSystem.java | 66 +++++++++++++++++++ .../apache/hadoop/hdfs/web/TokenAspect.java | 5 +- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 40 +++++++++-- .../services/org.apache.hadoop.fs.FileSystem | 1 + .../org/apache/hadoop/fs/TestSymlinkHdfs.java | 2 +- .../hadoop/hdfs/TestDFSClientRetries.java | 7 +- .../org/apache/hadoop/hdfs/TestDFSUtil.java | 4 +- .../TestDelegationTokenForProxyUser.java | 2 +- .../hdfs/server/namenode/TestAuditLogs.java | 6 +- .../hadoop/hdfs/web/TestHttpsFileSystem.java | 12 ++++ .../apache/hadoop/hdfs/web/TestWebHDFS.java | 13 ++-- .../web/TestWebHdfsFileSystemContract.java | 2 +- .../hadoop/hdfs/web/TestWebHdfsTimeouts.java | 15 ++--- .../hadoop/hdfs/web/WebHdfsTestUtil.java | 28 ++++++-- 20 files changed, 211 insertions(+), 55 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java index 006cb35b1ea..29f7c5d62e8 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java @@ -196,7 +196,13 @@ public class KerberosAuthenticator implements Authenticator { doSpnegoSequence(token); } else { LOG.debug("Using fallback authenticator sequence."); - getFallBackAuthenticator().authenticate(url, token); + Authenticator auth = getFallBackAuthenticator(); + // Make sure that the fall back authenticator have the same + // ConnectionConfigurator, since the method might be overridden. + // Otherwise the fall back authenticator might not have the information + // to make the connection (e.g., SSL certificates) + auth.setConnectionConfigurator(connConfigurator); + auth.authenticate(url, token); } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index b290cef53b7..e563e3de255 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -471,7 +471,9 @@ public class HttpServer implements FilterContainer { if (conf.getBoolean( CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES, CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) { - logContext.getInitParams().put( + @SuppressWarnings("unchecked") + Map params = logContext.getInitParams(); + params.put( "org.mortbay.jetty.servlet.Default.aliases", "true"); } logContext.setDisplayName("logs"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c3d222da870..382bb32e12f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -23,6 +23,8 @@ Release 2.3.0 - UNRELEASED HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. (Haohui Mai via jing9) + HDFS-3987. Support webhdfs over HTTPS. (Haohui Mai via jing9) + IMPROVEMENTS HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 6b1d31e3568..85035baef9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -76,9 +76,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.io.retry.RetryProxy; +import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; @@ -605,12 +604,19 @@ public class DFSUtil { * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from * the configuration. * - * @param conf configuration * @return list of InetSocketAddresses */ - public static Map> getHaNnHttpAddresses( - Configuration conf) { - return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + public static Map> getHaNnWebHdfsAddresses( + Configuration conf, String scheme) { + if (WebHdfsFileSystem.SCHEME.equals(scheme)) { + return getAddresses(conf, null, + DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) { + return getAddresses(conf, null, + DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY); + } else { + throw new IllegalArgumentException("Unsupported scheme: " + scheme); + } } /** @@ -619,18 +625,28 @@ public class DFSUtil { * cluster, the resolver further resolves the logical name (i.e., the authority * in the URL) into real namenode addresses. */ - public static InetSocketAddress[] resolve(URI uri, int schemeDefaultPort, - Configuration conf) throws IOException { + public static InetSocketAddress[] resolveWebHdfsUri(URI uri, Configuration conf) + throws IOException { + int defaultPort; + String scheme = uri.getScheme(); + if (WebHdfsFileSystem.SCHEME.equals(scheme)) { + defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT; + } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) { + defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT; + } else { + throw new IllegalArgumentException("Unsupported scheme: " + scheme); + } + ArrayList ret = new ArrayList(); if (!HAUtil.isLogicalUri(conf, uri)) { InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(), - schemeDefaultPort); + defaultPort); ret.add(addr); } else { Map> addresses = DFSUtil - .getHaNnHttpAddresses(conf); + .getHaNnWebHdfsAddresses(conf, scheme); for (Map addrs : addresses.values()) { for (InetSocketAddress addr : addrs.values()) { @@ -1391,4 +1407,4 @@ public class DFSUtil { return (value == null || value.isEmpty()) ? defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 790347e97dc..d7a64a4136b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -801,6 +801,10 @@ public class NameNode implements NameNodeStatusMXBean { return httpServer.getHttpAddress(); } + /** + * @return NameNode HTTPS address, used by the Web UI, image transfer, + * and HTTP-based file system clients like Hftp and WebHDFS + */ public InetSocketAddress getHttpsAddress() { return httpServer.getHttpsAddress(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 288455bd2b3..891b0c96e0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.hdfs.web.ParamFilter; +import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; @@ -96,6 +97,7 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam; import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.UriFsPathParam; import org.apache.hadoop.hdfs.web.resources.UserParam; +import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.Credentials; @@ -210,7 +212,8 @@ public class NamenodeWebHdfsMethods { final Credentials c = DelegationTokenSecretManager.createCredentials( namenode, ugi, renewer != null? renewer: ugi.getShortUserName()); final Token t = c.getAllTokens().iterator().next(); - t.setKind(WebHdfsFileSystem.TOKEN_KIND); + Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND : SWebHdfsFileSystem.TOKEN_KIND; + t.setKind(kind); return t; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java new file mode 100644 index 00000000000..bce7b7c6cdb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web; + +import java.io.IOException; +import java.security.GeneralSecurityException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.io.Text; + +public class SWebHdfsFileSystem extends WebHdfsFileSystem { + + public static final Text TOKEN_KIND = new Text("SWEBHDFS delegation"); + public static final String SCHEME = "swebhdfs"; + + @Override + public String getScheme() { + return SCHEME; + } + + @Override + protected String getTransportScheme() { + return "https"; + } + + @Override + protected synchronized void initializeTokenAspect() { + tokenAspect = new TokenAspect(this, TOKEN_KIND); + } + + @Override + protected void initializeConnectionFactory(Configuration conf) + throws IOException { + connectionFactory = new URLConnectionFactory( + URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT); + try { + connectionFactory.setConnConfigurator(URLConnectionFactory + .newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, + conf)); + } catch (GeneralSecurityException e) { + throw new IOException(e); + } + } + + @Override + protected int getDefaultPort() { + return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, + DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java index 3bf88f3e6ab..20dba36edda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java @@ -58,7 +58,8 @@ final class TokenAspect { public boolean handleKind(Text kind) { return kind.equals(HftpFileSystem.TOKEN_KIND) || kind.equals(HsftpFileSystem.TOKEN_KIND) - || kind.equals(WebHdfsFileSystem.TOKEN_KIND); + || kind.equals(WebHdfsFileSystem.TOKEN_KIND) + || kind.equals(SWebHdfsFileSystem.TOKEN_KIND); } @Override @@ -83,6 +84,8 @@ final class TokenAspect { uri = DFSUtil.createUri(HsftpFileSystem.SCHEME, address); } else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) { uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, address); + } else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) { + uri = DFSUtil.createUri(SWebHdfsFileSystem.SCHEME, address); } else { throw new IllegalArgumentException("Unsupported scheme"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index a26c892ce20..87abf3f83b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; -import org.apache.hadoop.hdfs.web.TokenAspect.DTSelecorByKind; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; @@ -99,7 +98,6 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.Progressable; import org.mortbay.util.ajax.JSON; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.collect.Lists; @@ -119,8 +117,7 @@ public class WebHdfsFileSystem extends FileSystem /** Delegation token kind */ public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); - protected TokenAspect tokenAspect = new TokenAspect( - this, TOKEN_KIND); + protected TokenAspect tokenAspect; private UserGroupInformation ugi; private URI uri; @@ -141,17 +138,44 @@ public class WebHdfsFileSystem extends FileSystem return SCHEME; } + /** + * return the underlying transport protocol (http / https). + */ + protected String getTransportScheme() { + return "http"; + } + + /** + * Initialize tokenAspect. This function is intended to + * be overridden by SWebHdfsFileSystem. + */ + protected synchronized void initializeTokenAspect() { + tokenAspect = new TokenAspect(this, TOKEN_KIND); + } + + /** + * Initialize connectionFactory. This function is intended to + * be overridden by SWebHdfsFileSystem. + */ + protected void initializeConnectionFactory(Configuration conf) + throws IOException { + connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + } + @Override public synchronized void initialize(URI uri, Configuration conf ) throws IOException { super.initialize(uri, conf); setConf(conf); + initializeTokenAspect(); + initializeConnectionFactory(conf); + ugi = UserGroupInformation.getCurrentUser(); try { this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null); - this.nnAddrs = DFSUtil.resolve(this.uri, getDefaultPort(), conf); + this.nnAddrs = DFSUtil.resolveWebHdfsUri(this.uri, conf); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } @@ -343,7 +367,7 @@ public class WebHdfsFileSystem extends FileSystem */ private URL getNamenodeURL(String path, String query) throws IOException { InetSocketAddress nnAddr = getCurrentNNAddr(); - final URL url = new URL("http", nnAddr.getHostName(), + final URL url = new URL(getTransportScheme(), nnAddr.getHostName(), nnAddr.getPort(), path + '?' + query); if (LOG.isTraceEnabled()) { LOG.trace("url=" + url); @@ -841,7 +865,9 @@ public class WebHdfsFileSystem extends FileSystem @Override public void close() throws IOException { super.close(); - tokenAspect.removeRenewAction(); + synchronized (this) { + tokenAspect.removeRenewAction(); + } } class OffsetUrlOpener extends ByteRangeInputStream.URLOpener { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index aa183204ee3..78fd925f1bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -17,3 +17,4 @@ org.apache.hadoop.hdfs.DistributedFileSystem org.apache.hadoop.hdfs.web.HftpFileSystem org.apache.hadoop.hdfs.web.HsftpFileSystem org.apache.hadoop.hdfs.web.WebHdfsFileSystem +org.apache.hadoop.hdfs.web.SWebHdfsFileSystem diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java index f6fd6b8487d..5ef46b01f06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java @@ -89,7 +89,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest { conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.set(FsPermission.UMASK_LABEL, "000"); cluster = new MiniDFSCluster.Builder(conf).build(); - webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); + webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); dfs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 4e229e0ed81..e8b652a8e07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; @@ -834,8 +835,8 @@ public class TestDFSClientRetries { try { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); - final FileSystem fs = isWebHDFS? - WebHdfsTestUtil.getWebHdfsFileSystem(conf): dfs; + final FileSystem fs = isWebHDFS ? WebHdfsTestUtil.getWebHdfsFileSystem( + conf, WebHdfsFileSystem.SCHEME) : dfs; final URI uri = dfs.getUri(); assertTrue(HdfsUtils.isHealthy(uri)); @@ -1039,7 +1040,7 @@ public class TestDFSClientRetries { final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( username, new String[]{"supergroup"}); - return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf) + return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME) : DFSTestUtil.getFileSystemAs(ugi, conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index c0b7daaf01a..e3d0e0213f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -556,7 +556,7 @@ public class TestDFSUtil { Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR); Map> map = - DFSUtil.getHaNnHttpAddresses(conf); + DFSUtil.getHaNnWebHdfsAddresses(conf, "webhdfs"); assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString()); assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString()); @@ -574,7 +574,7 @@ public class TestDFSUtil { Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR); URI uri = new URI("webhdfs://ns1"); assertTrue(HAUtil.isLogicalUri(conf, uri)); - InetSocketAddress[] addrs = DFSUtil.resolve(uri, DEFAULT_PORT, conf); + InetSocketAddress[] addrs = DFSUtil.resolveWebHdfsUri(uri, conf); assertArrayEquals(new InetSocketAddress[] { new InetSocketAddress(NS1_NN1_HOST, DEFAULT_PORT), new InetSocketAddress(NS1_NN2_HOST, DEFAULT_PORT), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java index a5a9521b0c4..05a89042577 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java @@ -147,7 +147,7 @@ public class TestDelegationTokenForProxyUser { public void testWebHdfsDoAs() throws Exception { WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()"); WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName()); - final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config); + final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config, WebHdfsFileSystem.SCHEME); final Path root = new Path("/"); cluster.getFileSystem().setPermission(root, new FsPermission((short)0777)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index 175dffd65df..530554c369f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -163,7 +163,7 @@ public class TestAuditLogs { setupAuditLogs(); - WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf); + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); InputStream istream = webfs.open(file); int val = istream.read(); istream.close(); @@ -182,7 +182,7 @@ public class TestAuditLogs { setupAuditLogs(); - WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf); + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); FileStatus st = webfs.getFileStatus(file); verifyAuditLogs(true); @@ -222,7 +222,7 @@ public class TestAuditLogs { setupAuditLogs(); try { - WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf); + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); InputStream istream = webfs.open(file); int val = istream.read(); fail("open+read must not succeed, got " + val); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java index e1a7f18b9a2..8d2de80fb32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java @@ -65,6 +65,7 @@ public class TestHttpsFileSystem { cluster.getFileSystem().create(new Path("/test")).close(); InetSocketAddress addr = cluster.getNameNode().getHttpsAddress(); nnAddr = addr.getHostName() + ":" + addr.getPort(); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr); } @AfterClass @@ -80,4 +81,15 @@ public class TestHttpsFileSystem { Assert.assertTrue(fs.exists(new Path("/test"))); fs.close(); } + + @Test + public void testSWebHdfsFileSystem() throws Exception { + FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, "swebhdfs"); + final Path f = new Path("/testswebhdfs"); + FSDataOutputStream os = fs.create(f); + os.write(23); + os.close(); + Assert.assertTrue(fs.exists(f)); + fs.close(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 3998fdc3c93..9d0b473c5bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -101,7 +101,7 @@ public class TestWebHDFS { try { cluster.waitActive(); - final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); + final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); final Path dir = new Path("/test/largeFile"); Assert.assertTrue(fs.mkdirs(dir)); @@ -229,9 +229,9 @@ public class TestWebHDFS { new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); try { cluster.waitActive(); - WebHdfsTestUtil.getWebHdfsFileSystem(conf).setPermission( - new Path("/"), - new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); + WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME) + .setPermission(new Path("/"), + new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); // trick the NN into not believing it's not the superuser so we can // tell if the correct user is used by listStatus @@ -243,8 +243,9 @@ public class TestWebHDFS { .doAs(new PrivilegedExceptionAction() { @Override public Void run() throws IOException, URISyntaxException { - FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); - Path d = new Path("/my-dir"); + FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, + WebHdfsFileSystem.SCHEME); + Path d = new Path("/my-dir"); Assert.assertTrue(fs.mkdirs(d)); for (int i=0; i < listLimit*3; i++) { Path p = new Path(d, "file-"+i); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index 4181ce60376..687eddc8da3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -82,7 +82,7 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest { final UserGroupInformation current = UserGroupInformation.getCurrentUser(); ugi = UserGroupInformation.createUserForTesting( current.getShortUserName() + "x", new String[]{"user"}); - fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf); + fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME); defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index 9a85cf97cbe..41e0b928689 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -18,35 +18,32 @@ package org.apache.hadoop.hdfs.web; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; -import java.net.SocketAddress; import java.net.SocketTimeoutException; import java.nio.channels.SocketChannel; import java.util.ArrayList; import java.util.List; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** * This test suite checks that WebHdfsFileSystem sets connection timeouts and @@ -77,7 +74,7 @@ public class TestWebHdfsTimeouts { serverSocket = new ServerSocket(0, CONNECTION_BACKLOG); nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort()); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort()); - fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); + fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); fs.connectionFactory = connectionFactory; clients = new ArrayList(); serverThread = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java index 9b512e04480..2f092cc2cf0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java @@ -46,20 +46,36 @@ public class WebHdfsTestUtil { return conf; } - public static WebHdfsFileSystem getWebHdfsFileSystem(final Configuration conf - ) throws IOException, URISyntaxException { - final String uri = WebHdfsFileSystem.SCHEME + "://" - + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + public static WebHdfsFileSystem getWebHdfsFileSystem( + final Configuration conf, String scheme) throws IOException, + URISyntaxException { + final String uri; + + if (WebHdfsFileSystem.SCHEME.equals(scheme)) { + uri = WebHdfsFileSystem.SCHEME + "://" + + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) { + uri = SWebHdfsFileSystem.SCHEME + "://" + + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY); + } else { + throw new IllegalArgumentException("unknown scheme:" + scheme); + } return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf); } public static WebHdfsFileSystem getWebHdfsFileSystemAs( - final UserGroupInformation ugi, final Configuration conf + final UserGroupInformation ugi, final Configuration conf + ) throws IOException, InterruptedException { + return getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME); + } + + public static WebHdfsFileSystem getWebHdfsFileSystemAs( + final UserGroupInformation ugi, final Configuration conf, String scheme ) throws IOException, InterruptedException { return ugi.doAs(new PrivilegedExceptionAction() { @Override public WebHdfsFileSystem run() throws Exception { - return getWebHdfsFileSystem(conf); + return getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); } }); }