From 53f2a1efe5778c5a9e20f5346716fe96639678b6 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Sat, 16 Nov 2013 01:06:27 +0000 Subject: [PATCH] HDFS-5502. Merge change r1542438 from trunk. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1542439 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/http/HttpServer.java | 14 + hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/server/namenode/NameNode.java | 4 + .../server/namenode/NameNodeHttpServer.java | 7 +- .../hdfs/tools/DelegationTokenFetcher.java | 4 +- .../hadoop/hdfs/web/HftpFileSystem.java | 23 +- .../hadoop/hdfs/web/HsftpFileSystem.java | 161 +----------- .../apache/hadoop/hdfs/web/TokenAspect.java | 4 + .../hadoop/hdfs/web/URLConnectionFactory.java | 35 +++ .../hadoop/hdfs/TestNameNodeHttpServer.java | 39 --- .../hdfs/web/TestHftpDelegationToken.java | 248 ++++-------------- .../hadoop/hdfs/web/TestHftpFileSystem.java | 40 +-- .../hadoop/hdfs/web/TestHttpsFileSystem.java | 83 ++++++ .../src/test/resources/ssl-client.xml | 26 -- 14 files changed, 252 insertions(+), 438 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/ssl-client.xml diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index 22346fab999..b290cef53b7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -84,6 +84,7 @@ import org.mortbay.jetty.webapp.WebAppContext; import org.mortbay.thread.QueuedThreadPool; import org.mortbay.util.MultiException; +import com.google.common.base.Preconditions; import com.sun.jersey.spi.container.servlet.ServletContainer; /** @@ -715,6 +716,19 @@ public class HttpServer implements FilterContainer { return webServer.getConnectors()[0].getLocalPort(); } + /** + * Get the port that corresponds to a particular connector. In the case of + * HDFS, the second connector corresponds to the HTTPS connector. + * + * @return the corresponding port for the connector, or -1 if there's no such + * connector. + */ + public int getConnectorPort(int index) { + Preconditions.checkArgument(index >= 0); + return index < webServer.getConnectors().length ? + webServer.getConnectors()[index].getLocalPort() : -1; + } + /** * Set the min, max number of worker threads (simultaneous connections). */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4b83abe5065..ddc2f2f81c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -186,6 +186,8 @@ Release 2.3.0 - UNRELEASED HDFS-5438. Flaws in block report processing can cause data loss. (kihwal) + HDFS-5502. Fix HTTPS support in HsftpFileSystem. (Haohui Mai via jing9) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 591727aad4d..f91492f12a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -793,6 +793,10 @@ public class NameNode implements NameNodeStatusMXBean { return httpServer.getHttpAddress(); } + public InetSocketAddress getHttpsAddress() { + return httpServer.getHttpsAddress(); + } + /** * Verify that configured directories exist, then * Interactively confirm that formatting is desired diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index b5afd5f3dce..07762ed4d70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -119,7 +119,12 @@ public class NameNodeHttpServer { httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); setupServlets(httpServer, conf); httpServer.start(); - httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort()); + httpAddress = new InetSocketAddress(bindAddress.getAddress(), + httpServer.getPort()); + if (certSSL) { + httpsAddress = new InetSocketAddress(bindAddress.getAddress(), + httpServer.getConnectorPort(1)); + } } private Map getAuthFilterParams(Configuration conf) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index 107542add21..2517b4af3ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -218,6 +218,8 @@ public class DelegationTokenFetcher { .append(renewer); } + boolean isHttps = nnUri.getScheme().equals("https"); + HttpURLConnection conn = null; DataInputStream dis = null; InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnUri @@ -234,7 +236,7 @@ public class DelegationTokenFetcher { dis = new DataInputStream(in); ts.readFields(dis); for (Token token : ts.getAllTokens()) { - token.setKind(HftpFileSystem.TOKEN_KIND); + token.setKind(isHttps ? HsftpFileSystem.TOKEN_KIND : HftpFileSystem.TOKEN_KIND); SecurityUtil.setTokenService(token, serviceAddr); } return ts; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java index 8909c9d8f64..ff882119e1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java @@ -86,7 +86,7 @@ public class HftpFileSystem extends FileSystem HttpURLConnection.setFollowRedirects(true); } - URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + URLConnectionFactory connectionFactory; public static final Text TOKEN_KIND = new Text("HFTP delegation"); @@ -98,7 +98,7 @@ public class HftpFileSystem extends FileSystem public static final String HFTP_TIMEZONE = "UTC"; public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ"; - private TokenAspect tokenAspect = new TokenAspect(this, TOKEN_KIND); + protected TokenAspect tokenAspect; private Token delegationToken; private Token renewToken; @@ -172,6 +172,16 @@ public class HftpFileSystem extends FileSystem return SCHEME; } + /** + * Initialize connectionFactory and tokenAspect. This function is intended to + * be overridden by HsFtpFileSystem. + */ + protected void initConnectionFactoryAndTokenAspect(Configuration conf) + throws IOException { + tokenAspect = new TokenAspect(this, TOKEN_KIND); + connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + } + @Override public void initialize(final URI name, final Configuration conf) throws IOException { @@ -179,6 +189,7 @@ public class HftpFileSystem extends FileSystem setConf(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.nnUri = getNamenodeUri(name); + try { this.hftpURI = new URI(name.getScheme(), name.getAuthority(), null, null, null); @@ -186,6 +197,7 @@ public class HftpFileSystem extends FileSystem throw new IllegalArgumentException(e); } + initConnectionFactoryAndTokenAspect(conf); if (UserGroupInformation.isSecurityEnabled()) { tokenAspect.initDelegationToken(ugi); } @@ -212,8 +224,8 @@ public class HftpFileSystem extends FileSystem * * For other operations, however, the client has to send a * HDFS_DELEGATION_KIND token over the wire so that it can talk to Hadoop - * 0.20.3 clusters. Later releases fix this problem. See HDFS-5440 for more - * details. + * 0.20.203 clusters. Later releases fix this problem. See HDFS-5440 for + * more details. */ renewToken = token; delegationToken = new Token(token); @@ -229,13 +241,12 @@ public class HftpFileSystem extends FileSystem return ugi.doAs(new PrivilegedExceptionAction>() { @Override public Token run() throws IOException { - final String nnHttpUrl = nnUri.toString(); Credentials c; try { c = DelegationTokenFetcher.getDTfromRemote(connectionFactory, nnUri, renewer); } catch (IOException e) { if (e.getCause() instanceof ConnectException) { - LOG.warn("Couldn't connect to " + nnHttpUrl + + LOG.warn("Couldn't connect to " + nnUri + ", assuming security is disabled"); return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java index 695616b9601..e64becd1813 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java @@ -18,31 +18,14 @@ package org.apache.hadoop.hdfs.web; -import java.io.FileInputStream; import java.io.IOException; -import java.net.HttpURLConnection; -import java.net.InetSocketAddress; -import java.net.URI; -import java.net.URL; -import java.security.KeyStore; -import java.security.cert.X509Certificate; - -import javax.net.ssl.HostnameVerifier; -import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSession; -import javax.net.ssl.TrustManager; -import javax.net.ssl.TrustManagerFactory; -import javax.net.ssl.X509TrustManager; +import java.security.GeneralSecurityException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.util.Time; +import org.apache.hadoop.io.Text; /** * An implementation of a protocol for accessing filesystems over HTTPS. The @@ -55,9 +38,8 @@ import org.apache.hadoop.util.Time; @InterfaceAudience.Private @InterfaceStability.Evolving public class HsftpFileSystem extends HftpFileSystem { - - private static final long MM_SECONDS_PER_DAY = 1000 * 60 * 60 * 24; - private volatile int ExpWarnDays = 0; + public static final Text TOKEN_KIND = new Text("HSFTP delegation"); + public static final String SCHEME = "hsftp"; /** * Return the protocol scheme for the FileSystem. @@ -67,7 +49,7 @@ public class HsftpFileSystem extends HftpFileSystem { */ @Override public String getScheme() { - return "hsftp"; + return SCHEME; } /** @@ -79,66 +61,17 @@ public class HsftpFileSystem extends HftpFileSystem { } @Override - public void initialize(URI name, Configuration conf) throws IOException { - super.initialize(name, conf); - setupSsl(conf); - ExpWarnDays = conf.getInt("ssl.expiration.warn.days", 30); - } + protected void initConnectionFactoryAndTokenAspect(Configuration conf) throws IOException { + tokenAspect = new TokenAspect(this, TOKEN_KIND); - /** - * Set up SSL resources - * - * @throws IOException - */ - private static void setupSsl(Configuration conf) throws IOException { - Configuration sslConf = new HdfsConfiguration(false); - sslConf.addResource(conf.get(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, - DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); - FileInputStream fis = null; + connectionFactory = new URLConnectionFactory( + URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT); try { - SSLContext sc = SSLContext.getInstance("SSL"); - KeyManager[] kms = null; - TrustManager[] tms = null; - if (sslConf.get("ssl.client.keystore.location") != null) { - // initialize default key manager with keystore file and pass - KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509"); - KeyStore ks = KeyStore.getInstance(sslConf.get( - "ssl.client.keystore.type", "JKS")); - char[] ksPass = sslConf.get("ssl.client.keystore.password", "changeit") - .toCharArray(); - fis = new FileInputStream(sslConf.get("ssl.client.keystore.location", - "keystore.jks")); - ks.load(fis, ksPass); - kmf.init(ks, sslConf.get("ssl.client.keystore.keypassword", "changeit") - .toCharArray()); - kms = kmf.getKeyManagers(); - fis.close(); - fis = null; - } - // initialize default trust manager with truststore file and pass - if (sslConf.getBoolean("ssl.client.do.not.authenticate.server", false)) { - // by pass trustmanager validation - tms = new DummyTrustManager[] { new DummyTrustManager() }; - } else { - TrustManagerFactory tmf = TrustManagerFactory.getInstance("PKIX"); - KeyStore ts = KeyStore.getInstance(sslConf.get( - "ssl.client.truststore.type", "JKS")); - char[] tsPass = sslConf.get("ssl.client.truststore.password", - "changeit").toCharArray(); - fis = new FileInputStream(sslConf.get("ssl.client.truststore.location", - "truststore.jks")); - ts.load(fis, tsPass); - tmf.init(ts); - tms = tmf.getTrustManagers(); - } - sc.init(kms, tms, new java.security.SecureRandom()); - HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory()); - } catch (Exception e) { - throw new IOException("Could not initialize SSLContext", e); - } finally { - if (fis != null) { - fis.close(); - } + connectionFactory.setConnConfigurator(URLConnectionFactory + .newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, + conf)); + } catch (GeneralSecurityException e) { + throw new IOException(e); } } @@ -147,70 +80,4 @@ public class HsftpFileSystem extends HftpFileSystem { return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT); } - - @Override - protected HttpURLConnection openConnection(String path, String query) - throws IOException { - query = addDelegationTokenParam(query); - final URL url = new URL(getUnderlyingProtocol(), nnUri.getHost(), - nnUri.getPort(), path + '?' + query); - HttpsURLConnection conn; - conn = (HttpsURLConnection)connectionFactory.openConnection(url); - // bypass hostname verification - conn.setHostnameVerifier(new DummyHostnameVerifier()); - conn.setRequestMethod("GET"); - conn.connect(); - - // check cert expiration date - final int warnDays = ExpWarnDays; - if (warnDays > 0) { // make sure only check once - ExpWarnDays = 0; - long expTimeThreshold = warnDays * MM_SECONDS_PER_DAY + Time.now(); - X509Certificate[] clientCerts = (X509Certificate[]) conn - .getLocalCertificates(); - if (clientCerts != null) { - for (X509Certificate cert : clientCerts) { - long expTime = cert.getNotAfter().getTime(); - if (expTime < expTimeThreshold) { - StringBuilder sb = new StringBuilder(); - sb.append("\n Client certificate " - + cert.getSubjectX500Principal().getName()); - int dayOffSet = (int) ((expTime - Time.now()) / MM_SECONDS_PER_DAY); - sb.append(" have " + dayOffSet + " days to expire"); - LOG.warn(sb.toString()); - } - } - } - } - return (HttpURLConnection) conn; - } - - /** - * Dummy hostname verifier that is used to bypass hostname checking - */ - protected static class DummyHostnameVerifier implements HostnameVerifier { - @Override - public boolean verify(String hostname, SSLSession session) { - return true; - } - } - - /** - * Dummy trustmanager that is used to trust all server certificates - */ - protected static class DummyTrustManager implements X509TrustManager { - @Override - public void checkClientTrusted(X509Certificate[] chain, String authType) { - } - - @Override - public void checkServerTrusted(X509Certificate[] chain, String authType) { - } - - @Override - public X509Certificate[] getAcceptedIssuers() { - return null; - } - } - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java index f3fac077f16..3bf88f3e6ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java @@ -57,6 +57,7 @@ final class TokenAspect { @Override public boolean handleKind(Text kind) { return kind.equals(HftpFileSystem.TOKEN_KIND) + || kind.equals(HsftpFileSystem.TOKEN_KIND) || kind.equals(WebHdfsFileSystem.TOKEN_KIND); } @@ -75,8 +76,11 @@ final class TokenAspect { final InetSocketAddress address = SecurityUtil.getTokenServiceAddr(token); Text kind = token.getKind(); final URI uri; + if (kind.equals(HftpFileSystem.TOKEN_KIND)) { uri = DFSUtil.createUri(HftpFileSystem.SCHEME, address); + } else if (kind.equals(HsftpFileSystem.TOKEN_KIND)) { + uri = DFSUtil.createUri(HsftpFileSystem.SCHEME, address); } else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) { uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, address); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java index 0ac7d651e95..9418c1aef4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java @@ -22,6 +22,11 @@ import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; import java.net.URLConnection; +import java.security.GeneralSecurityException; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLSocketFactory; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -32,6 +37,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; +import org.apache.hadoop.security.ssl.SSLFactory; /** * Utilities for handling URLs @@ -64,6 +70,35 @@ public class URLConnectionFactory { } }; + /** + * Create a new ConnectionConfigurator for SSL connections + */ + static ConnectionConfigurator newSslConnConfigurator(final int timeout, + Configuration conf) throws IOException, GeneralSecurityException { + final SSLFactory factory; + final SSLSocketFactory sf; + final HostnameVerifier hv; + + factory = new SSLFactory(SSLFactory.Mode.CLIENT, conf); + factory.init(); + sf = factory.createSSLSocketFactory(); + hv = factory.getHostnameVerifier(); + + return new ConnectionConfigurator() { + @Override + public HttpURLConnection configure(HttpURLConnection conn) + throws IOException { + if (conn instanceof HttpsURLConnection) { + HttpsURLConnection c = (HttpsURLConnection) conn; + c.setSSLSocketFactory(sf); + c.setHostnameVerifier(hv); + } + URLConnectionFactory.setTimeouts(conn, timeout); + return conn; + } + }; + } + public URLConnectionFactory(int socketTimeout) { this.socketTimeout = socketTimeout; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java deleted file mode 100644 index 572f1d4ab5e..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.junit.Test; - -public class TestNameNodeHttpServer { - - @Test - public void testSslConfiguration() throws IOException { - Configuration conf = new Configuration(); - conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true); - System.setProperty("jetty.ssl.password", "foo"); - System.setProperty("jetty.ssl.keypassword", "bar"); - - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) - .build(); - - cluster.shutdown(); - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpDelegationToken.java index acb1faff55c..a6e441128d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpDelegationToken.java @@ -18,233 +18,77 @@ package org.apache.hadoop.hdfs.web; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; -import static org.junit.Assert.*; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; import java.io.IOException; -import java.net.ServerSocket; -import java.net.Socket; +import java.net.HttpURLConnection; import java.net.URI; -import java.security.PrivilegedExceptionAction; +import java.net.URISyntaxException; +import java.net.URL; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.web.HftpFileSystem; -import org.apache.hadoop.hdfs.web.HsftpFileSystem; import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.SecurityUtilTestHelper; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.junit.Assert; import org.junit.Test; +import org.mockito.Mockito; import org.mockito.internal.util.reflection.Whitebox; public class TestHftpDelegationToken { + /** + * Test whether HftpFileSystem maintain wire-compatibility for 0.20.203 when + * obtaining delegation token. See HDFS-5440 for more details. + */ @Test - public void testHdfsDelegationToken() throws Exception { - SecurityUtilTestHelper.setTokenServiceUseIp(true); - - final Configuration conf = new Configuration(); - conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - UserGroupInformation.setConfiguration(conf); - UserGroupInformation user = - UserGroupInformation.createUserForTesting("oom", - new String[]{"memory"}); - Token token = new Token - (new byte[0], new byte[0], - DelegationTokenIdentifier.HDFS_DELEGATION_KIND, - new Text("127.0.0.1:8020")); - user.addToken(token); - Token token2 = new Token - (null, null, new Text("other token"), new Text("127.0.0.1:8021")); - user.addToken(token2); - assertEquals("wrong tokens in user", 2, user.getTokens().size()); - FileSystem fs = - user.doAs(new PrivilegedExceptionAction() { - @Override - public FileSystem run() throws Exception { - return FileSystem.get(new URI("hftp://localhost:50470/"), conf); - } - }); - assertSame("wrong kind of file system", HftpFileSystem.class, - fs.getClass()); - assertSame("wrong token", token, - Whitebox.getInternalState(fs, "renewToken")); - } - - @Test - public void testSelectHftpDelegationToken() throws Exception { - SecurityUtilTestHelper.setTokenServiceUseIp(true); - + public void testTokenCompatibilityFor203() throws IOException, + URISyntaxException, AuthenticationException { Configuration conf = new Configuration(); - conf.setClass("fs.hftp.impl", HftpFileSystem.class, FileSystem.class); + HftpFileSystem fs = new HftpFileSystem(); - int httpPort = 80; - int httpsPort = 443; - conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, httpPort); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, httpsPort); + Token token = new Token(new byte[0], new byte[0], + DelegationTokenIdentifier.HDFS_DELEGATION_KIND, new Text( + "127.0.0.1:8020")); + Credentials cred = new Credentials(); + cred.addToken(HftpFileSystem.TOKEN_KIND, token); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + cred.write(new DataOutputStream(os)); - // test with implicit default port - URI fsUri = URI.create("hftp://localhost"); - HftpFileSystem fs = (HftpFileSystem) FileSystem.newInstance(fsUri, conf); - assertEquals(httpPort, fs.getCanonicalUri().getPort()); - checkTokenSelection(fs, httpPort, conf); + HttpURLConnection conn = mock(HttpURLConnection.class); + doReturn(new ByteArrayInputStream(os.toByteArray())).when(conn) + .getInputStream(); + doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode(); - // test with explicit default port - // Make sure it uses the port from the hftp URI. - fsUri = URI.create("hftp://localhost:"+httpPort); - fs = (HftpFileSystem) FileSystem.newInstance(fsUri, conf); - assertEquals(httpPort, fs.getCanonicalUri().getPort()); - checkTokenSelection(fs, httpPort, conf); + URLConnectionFactory factory = mock(URLConnectionFactory.class); + doReturn(conn).when(factory).openConnection(Mockito. any(), + anyBoolean()); - // test with non-default port - // Make sure it uses the port from the hftp URI. - fsUri = URI.create("hftp://localhost:"+(httpPort+1)); - fs = (HftpFileSystem) FileSystem.newInstance(fsUri, conf); - assertEquals(httpPort+1, fs.getCanonicalUri().getPort()); - checkTokenSelection(fs, httpPort + 1, conf); + fs.initialize(new URI("hftp://127.0.0.1:8020"), conf); + fs.connectionFactory = factory; - conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5); - } + UserGroupInformation ugi = UserGroupInformation.createUserForTesting("foo", + new String[] { "bar" }); - @Test - public void testSelectHsftpDelegationToken() throws Exception { - SecurityUtilTestHelper.setTokenServiceUseIp(true); + TokenAspect tokenAspect = new TokenAspect( + fs, HftpFileSystem.TOKEN_KIND); - Configuration conf = new Configuration(); - conf.setClass("fs.hsftp.impl", HsftpFileSystem.class, FileSystem.class); + tokenAspect.initDelegationToken(ugi); + tokenAspect.ensureTokenInitialized(); - int httpPort = 80; - int httpsPort = 443; - conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, httpPort); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, httpsPort); + Assert.assertSame(HftpFileSystem.TOKEN_KIND, fs.getRenewToken().getKind()); - // test with implicit default port - URI fsUri = URI.create("hsftp://localhost"); - HsftpFileSystem fs = (HsftpFileSystem) FileSystem.newInstance(fsUri, conf); - assertEquals(httpsPort, fs.getCanonicalUri().getPort()); - checkTokenSelection(fs, httpsPort, conf); - - // test with explicit default port - fsUri = URI.create("hsftp://localhost:"+httpsPort); - fs = (HsftpFileSystem) FileSystem.newInstance(fsUri, conf); - assertEquals(httpsPort, fs.getCanonicalUri().getPort()); - checkTokenSelection(fs, httpsPort, conf); - - // test with non-default port - fsUri = URI.create("hsftp://localhost:"+(httpsPort+1)); - fs = (HsftpFileSystem) FileSystem.newInstance(fsUri, conf); - assertEquals(httpsPort+1, fs.getCanonicalUri().getPort()); - checkTokenSelection(fs, httpsPort+1, conf); - - conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5); - } - - - @Test - public void testInsecureRemoteCluster() throws Exception { - final ServerSocket socket = new ServerSocket(0); // just reserve a port - socket.close(); - Configuration conf = new Configuration(); - URI fsUri = URI.create("hsftp://localhost:"+socket.getLocalPort()); - assertNull(FileSystem.newInstance(fsUri, conf).getDelegationToken(null)); - } - - @Test - public void testSecureClusterError() throws Exception { - final ServerSocket socket = new ServerSocket(0); - Thread t = new Thread() { - @Override - public void run() { - while (true) { // fetching does a few retries - try { - Socket s = socket.accept(); - s.getOutputStream().write(1234); - s.shutdownOutput(); - } catch (Exception e) { - break; - } - } - } - }; - t.start(); - - try { - Configuration conf = new Configuration(); - URI fsUri = URI.create("hsftp://localhost:"+socket.getLocalPort()); - Exception ex = null; - try { - FileSystem.newInstance(fsUri, conf).getDelegationToken(null); - } catch (Exception e) { - ex = e; - } - assertNotNull(ex); - assertNotNull(ex.getCause()); - assertEquals("Remote host closed connection during handshake", - ex.getCause().getMessage()); - } finally { - t.interrupt(); - } - } - - private void checkTokenSelection(HftpFileSystem fs, - int port, - Configuration conf) throws IOException { - UserGroupInformation ugi = - UserGroupInformation.createUserForTesting(fs.getUri().getAuthority(), new String[]{}); - - @SuppressWarnings("unchecked") - TokenAspect aspect = (TokenAspect) Whitebox.getInternalState(fs, "tokenAspect"); - - // use ip-based tokens - SecurityUtilTestHelper.setTokenServiceUseIp(true); - - // test fallback to hdfs token - Token hdfsToken = new Token( - new byte[0], new byte[0], - DelegationTokenIdentifier.HDFS_DELEGATION_KIND, - new Text("127.0.0.1:8020")); - ugi.addToken(hdfsToken); - - // test fallback to hdfs token - Token token = aspect.selectDelegationToken(ugi); - assertNotNull(token); - assertEquals(hdfsToken, token); - - // test hftp is favored over hdfs - Token hftpToken = new Token( - new byte[0], new byte[0], - HftpFileSystem.TOKEN_KIND, new Text("127.0.0.1:"+port)); - ugi.addToken(hftpToken); - token = aspect.selectDelegationToken(ugi); - assertNotNull(token); - assertEquals(hftpToken, token); - - // switch to using host-based tokens, no token should match - SecurityUtilTestHelper.setTokenServiceUseIp(false); - token = aspect.selectDelegationToken(ugi); - assertNull(token); - - // test fallback to hdfs token - hdfsToken = new Token( - new byte[0], new byte[0], - DelegationTokenIdentifier.HDFS_DELEGATION_KIND, - new Text("localhost:8020")); - ugi.addToken(hdfsToken); - token = aspect.selectDelegationToken(ugi); - assertNotNull(token); - assertEquals(hdfsToken, token); - - // test hftp is favored over hdfs - hftpToken = new Token( - new byte[0], new byte[0], - HftpFileSystem.TOKEN_KIND, new Text("localhost:"+port)); - ugi.addToken(hftpToken); - token = aspect.selectDelegationToken(ugi); - assertNotNull(token); - assertEquals(hftpToken, token); + Token tok = (Token) Whitebox.getInternalState(fs, "delegationToken"); + Assert.assertNotSame("Not making a copy of the remote token", token, tok); + Assert.assertEquals(token.getKind(), tok.getKind()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java index 1bd3c3974f7..06ac50a0c7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; @@ -29,23 +30,22 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.net.URLConnection; -import java.util.Random; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.util.ServletUtil; -import org.apache.log4j.Level; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -53,8 +53,10 @@ import org.junit.BeforeClass; import org.junit.Test; public class TestHftpFileSystem { - private static final Random RAN = new Random(); - + private static final String BASEDIR = System.getProperty("test.build.dir", + "target/test-dir") + "/" + TestHftpFileSystem.class.getSimpleName(); + private static String keystoresDir; + private static String sslConfDir; private static Configuration config = null; private static MiniDFSCluster cluster = null; private static String blockPoolId = null; @@ -83,25 +85,28 @@ public class TestHftpFileSystem { new Path("/foo\">bar/foo\">bar"), }; @BeforeClass - public static void setUp() throws IOException { - ((Log4JLogger) HftpFileSystem.LOG).getLogger().setLevel(Level.ALL); - - final long seed = RAN.nextLong(); - System.out.println("seed=" + seed); - RAN.setSeed(seed); - + public static void setUp() throws Exception { config = new Configuration(); cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build(); blockPoolId = cluster.getNamesystem().getBlockPoolId(); hftpUri = "hftp://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + File base = new File(BASEDIR); + FileUtil.fullyDelete(base); + base.mkdirs(); + keystoresDir = new File(BASEDIR).getAbsolutePath(); + sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHftpFileSystem.class); + + KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, config, false); } @AfterClass - public static void tearDown() throws IOException { + public static void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); } + FileUtil.fullyDelete(new File(BASEDIR)); + KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); } @Before @@ -352,9 +357,12 @@ public class TestHftpFileSystem { Configuration conf = new Configuration(); URI uri = URI.create("hftp://localhost"); HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); - URLConnection conn = fs.connectionFactory.openConnection(new URL("http://localhost")); - assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, conn.getConnectTimeout()); - assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, conn.getReadTimeout()); + URLConnection conn = fs.connectionFactory.openConnection(new URL( + "http://localhost")); + assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, + conn.getConnectTimeout()); + assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, + conn.getReadTimeout()); } // / diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java new file mode 100644 index 00000000000..e1a7f18b9a2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web; + +import java.io.File; +import java.net.InetSocketAddress; +import java.net.URI; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestHttpsFileSystem { + private static final String BASEDIR = System.getProperty("test.build.dir", + "target/test-dir") + "/" + TestHttpsFileSystem.class.getSimpleName(); + + private static MiniDFSCluster cluster; + private static Configuration conf; + + private static String keystoresDir; + private static String sslConfDir; + private static String nnAddr; + + @BeforeClass + public static void setUp() throws Exception { + conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); + conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); + + File base = new File(BASEDIR); + FileUtil.fullyDelete(base); + base.mkdirs(); + keystoresDir = new File(BASEDIR).getAbsolutePath(); + sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHttpsFileSystem.class); + + KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + cluster.getFileSystem().create(new Path("/test")).close(); + InetSocketAddress addr = cluster.getNameNode().getHttpsAddress(); + nnAddr = addr.getHostName() + ":" + addr.getPort(); + } + + @AfterClass + public static void tearDown() throws Exception { + cluster.shutdown(); + FileUtil.fullyDelete(new File(BASEDIR)); + KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); + } + + @Test + public void testHsftpFileSystem() throws Exception { + FileSystem fs = FileSystem.get(new URI("hsftp://" + nnAddr), conf); + Assert.assertTrue(fs.exists(new Path("/test"))); + fs.close(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/ssl-client.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/ssl-client.xml deleted file mode 100644 index 98910049ab3..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/ssl-client.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - ssl.client.do.not.authenticate.server - true - -