HDFS-5502. Fix HTTPS support in HsftpFileSystem. Contributed by Haohui Mai.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1542438 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-11-16 01:00:42 +00:00
parent a2200a6417
commit fef8f49c58
14 changed files with 252 additions and 438 deletions

View File

@ -85,6 +85,7 @@ import org.mortbay.jetty.webapp.WebAppContext;
import org.mortbay.thread.QueuedThreadPool;
import org.mortbay.util.MultiException;
import com.google.common.base.Preconditions;
import com.sun.jersey.spi.container.servlet.ServletContainer;
/**
@ -716,6 +717,19 @@ public class HttpServer implements FilterContainer {
return webServer.getConnectors()[0].getLocalPort();
}
/**
* Get the port that corresponds to a particular connector. In the case of
* HDFS, the second connector corresponds to the HTTPS connector.
*
* @return the corresponding port for the connector, or -1 if there's no such
* connector.
*/
public int getConnectorPort(int index) {
Preconditions.checkArgument(index >= 0);
return index < webServer.getConnectors().length ?
webServer.getConnectors()[index].getLocalPort() : -1;
}
/**
* Set the min, max number of worker threads (simultaneous connections).
*/

View File

@ -564,6 +564,8 @@ Release 2.3.0 - UNRELEASED
HDFS-5438. Flaws in block report processing can cause data loss. (kihwal)
HDFS-5502. Fix HTTPS support in HsftpFileSystem. (Haohui Mai via jing9)
Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -796,6 +796,10 @@ public class NameNode implements NameNodeStatusMXBean {
return httpServer.getHttpAddress();
}
public InetSocketAddress getHttpsAddress() {
return httpServer.getHttpsAddress();
}
/**
* Verify that configured directories exist, then
* Interactively confirm that formatting is desired

View File

@ -119,7 +119,12 @@ public class NameNodeHttpServer {
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
setupServlets(httpServer, conf);
httpServer.start();
httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort());
httpAddress = new InetSocketAddress(bindAddress.getAddress(),
httpServer.getPort());
if (certSSL) {
httpsAddress = new InetSocketAddress(bindAddress.getAddress(),
httpServer.getConnectorPort(1));
}
}
private Map<String, String> getAuthFilterParams(Configuration conf)

View File

@ -221,6 +221,8 @@ public class DelegationTokenFetcher {
.append(renewer);
}
boolean isHttps = nnUri.getScheme().equals("https");
HttpURLConnection conn = null;
DataInputStream dis = null;
InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnUri
@ -237,7 +239,7 @@ public class DelegationTokenFetcher {
dis = new DataInputStream(in);
ts.readFields(dis);
for (Token<?> token : ts.getAllTokens()) {
token.setKind(HftpFileSystem.TOKEN_KIND);
token.setKind(isHttps ? HsftpFileSystem.TOKEN_KIND : HftpFileSystem.TOKEN_KIND);
SecurityUtil.setTokenService(token, serviceAddr);
}
return ts;

View File

@ -86,7 +86,7 @@ public class HftpFileSystem extends FileSystem
HttpURLConnection.setFollowRedirects(true);
}
URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
URLConnectionFactory connectionFactory;
public static final Text TOKEN_KIND = new Text("HFTP delegation");
@ -98,7 +98,7 @@ public class HftpFileSystem extends FileSystem
public static final String HFTP_TIMEZONE = "UTC";
public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
private TokenAspect<HftpFileSystem> tokenAspect = new TokenAspect<HftpFileSystem>(this, TOKEN_KIND);
protected TokenAspect<HftpFileSystem> tokenAspect;
private Token<?> delegationToken;
private Token<?> renewToken;
@ -172,6 +172,16 @@ public class HftpFileSystem extends FileSystem
return SCHEME;
}
/**
* Initialize connectionFactory and tokenAspect. This function is intended to
* be overridden by HsFtpFileSystem.
*/
protected void initConnectionFactoryAndTokenAspect(Configuration conf)
throws IOException {
tokenAspect = new TokenAspect<HftpFileSystem>(this, TOKEN_KIND);
connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
}
@Override
public void initialize(final URI name, final Configuration conf)
throws IOException {
@ -179,6 +189,7 @@ public class HftpFileSystem extends FileSystem
setConf(conf);
this.ugi = UserGroupInformation.getCurrentUser();
this.nnUri = getNamenodeUri(name);
try {
this.hftpURI = new URI(name.getScheme(), name.getAuthority(),
null, null, null);
@ -186,6 +197,7 @@ public class HftpFileSystem extends FileSystem
throw new IllegalArgumentException(e);
}
initConnectionFactoryAndTokenAspect(conf);
if (UserGroupInformation.isSecurityEnabled()) {
tokenAspect.initDelegationToken(ugi);
}
@ -212,8 +224,8 @@ public class HftpFileSystem extends FileSystem
*
* For other operations, however, the client has to send a
* HDFS_DELEGATION_KIND token over the wire so that it can talk to Hadoop
* 0.20.3 clusters. Later releases fix this problem. See HDFS-5440 for more
* details.
* 0.20.203 clusters. Later releases fix this problem. See HDFS-5440 for
* more details.
*/
renewToken = token;
delegationToken = new Token<T>(token);
@ -229,13 +241,12 @@ public class HftpFileSystem extends FileSystem
return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
@Override
public Token<?> run() throws IOException {
final String nnHttpUrl = nnUri.toString();
Credentials c;
try {
c = DelegationTokenFetcher.getDTfromRemote(connectionFactory, nnUri, renewer);
} catch (IOException e) {
if (e.getCause() instanceof ConnectException) {
LOG.warn("Couldn't connect to " + nnHttpUrl +
LOG.warn("Couldn't connect to " + nnUri +
", assuming security is disabled");
return null;
}

View File

@ -18,31 +18,14 @@
package org.apache.hadoop.hdfs.web;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.security.KeyStore;
import java.security.cert.X509Certificate;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.KeyManager;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSession;
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
import javax.net.ssl.X509TrustManager;
import java.security.GeneralSecurityException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.io.Text;
/**
* An implementation of a protocol for accessing filesystems over HTTPS. The
@ -55,9 +38,8 @@ import org.apache.hadoop.util.Time;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class HsftpFileSystem extends HftpFileSystem {
private static final long MM_SECONDS_PER_DAY = 1000 * 60 * 60 * 24;
private volatile int ExpWarnDays = 0;
public static final Text TOKEN_KIND = new Text("HSFTP delegation");
public static final String SCHEME = "hsftp";
/**
* Return the protocol scheme for the FileSystem.
@ -67,7 +49,7 @@ public class HsftpFileSystem extends HftpFileSystem {
*/
@Override
public String getScheme() {
return "hsftp";
return SCHEME;
}
/**
@ -79,66 +61,17 @@ public class HsftpFileSystem extends HftpFileSystem {
}
@Override
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
setupSsl(conf);
ExpWarnDays = conf.getInt("ssl.expiration.warn.days", 30);
}
protected void initConnectionFactoryAndTokenAspect(Configuration conf) throws IOException {
tokenAspect = new TokenAspect<HftpFileSystem>(this, TOKEN_KIND);
/**
* Set up SSL resources
*
* @throws IOException
*/
private static void setupSsl(Configuration conf) throws IOException {
Configuration sslConf = new HdfsConfiguration(false);
sslConf.addResource(conf.get(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
FileInputStream fis = null;
connectionFactory = new URLConnectionFactory(
URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
try {
SSLContext sc = SSLContext.getInstance("SSL");
KeyManager[] kms = null;
TrustManager[] tms = null;
if (sslConf.get("ssl.client.keystore.location") != null) {
// initialize default key manager with keystore file and pass
KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509");
KeyStore ks = KeyStore.getInstance(sslConf.get(
"ssl.client.keystore.type", "JKS"));
char[] ksPass = sslConf.get("ssl.client.keystore.password", "changeit")
.toCharArray();
fis = new FileInputStream(sslConf.get("ssl.client.keystore.location",
"keystore.jks"));
ks.load(fis, ksPass);
kmf.init(ks, sslConf.get("ssl.client.keystore.keypassword", "changeit")
.toCharArray());
kms = kmf.getKeyManagers();
fis.close();
fis = null;
}
// initialize default trust manager with truststore file and pass
if (sslConf.getBoolean("ssl.client.do.not.authenticate.server", false)) {
// by pass trustmanager validation
tms = new DummyTrustManager[] { new DummyTrustManager() };
} else {
TrustManagerFactory tmf = TrustManagerFactory.getInstance("PKIX");
KeyStore ts = KeyStore.getInstance(sslConf.get(
"ssl.client.truststore.type", "JKS"));
char[] tsPass = sslConf.get("ssl.client.truststore.password",
"changeit").toCharArray();
fis = new FileInputStream(sslConf.get("ssl.client.truststore.location",
"truststore.jks"));
ts.load(fis, tsPass);
tmf.init(ts);
tms = tmf.getTrustManagers();
}
sc.init(kms, tms, new java.security.SecureRandom());
HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory());
} catch (Exception e) {
throw new IOException("Could not initialize SSLContext", e);
} finally {
if (fis != null) {
fis.close();
}
connectionFactory.setConnConfigurator(URLConnectionFactory
.newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
conf));
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
}
@ -147,70 +80,4 @@ public class HsftpFileSystem extends HftpFileSystem {
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
}
@Override
protected HttpURLConnection openConnection(String path, String query)
throws IOException {
query = addDelegationTokenParam(query);
final URL url = new URL(getUnderlyingProtocol(), nnUri.getHost(),
nnUri.getPort(), path + '?' + query);
HttpsURLConnection conn;
conn = (HttpsURLConnection)connectionFactory.openConnection(url);
// bypass hostname verification
conn.setHostnameVerifier(new DummyHostnameVerifier());
conn.setRequestMethod("GET");
conn.connect();
// check cert expiration date
final int warnDays = ExpWarnDays;
if (warnDays > 0) { // make sure only check once
ExpWarnDays = 0;
long expTimeThreshold = warnDays * MM_SECONDS_PER_DAY + Time.now();
X509Certificate[] clientCerts = (X509Certificate[]) conn
.getLocalCertificates();
if (clientCerts != null) {
for (X509Certificate cert : clientCerts) {
long expTime = cert.getNotAfter().getTime();
if (expTime < expTimeThreshold) {
StringBuilder sb = new StringBuilder();
sb.append("\n Client certificate "
+ cert.getSubjectX500Principal().getName());
int dayOffSet = (int) ((expTime - Time.now()) / MM_SECONDS_PER_DAY);
sb.append(" have " + dayOffSet + " days to expire");
LOG.warn(sb.toString());
}
}
}
}
return (HttpURLConnection) conn;
}
/**
* Dummy hostname verifier that is used to bypass hostname checking
*/
protected static class DummyHostnameVerifier implements HostnameVerifier {
@Override
public boolean verify(String hostname, SSLSession session) {
return true;
}
}
/**
* Dummy trustmanager that is used to trust all server certificates
*/
protected static class DummyTrustManager implements X509TrustManager {
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType) {
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType) {
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return null;
}
}
}

View File

@ -57,6 +57,7 @@ final class TokenAspect<T extends FileSystem & Renewable> {
@Override
public boolean handleKind(Text kind) {
return kind.equals(HftpFileSystem.TOKEN_KIND)
|| kind.equals(HsftpFileSystem.TOKEN_KIND)
|| kind.equals(WebHdfsFileSystem.TOKEN_KIND);
}
@ -75,8 +76,11 @@ final class TokenAspect<T extends FileSystem & Renewable> {
final InetSocketAddress address = SecurityUtil.getTokenServiceAddr(token);
Text kind = token.getKind();
final URI uri;
if (kind.equals(HftpFileSystem.TOKEN_KIND)) {
uri = DFSUtil.createUri(HftpFileSystem.SCHEME, address);
} else if (kind.equals(HsftpFileSystem.TOKEN_KIND)) {
uri = DFSUtil.createUri(HsftpFileSystem.SCHEME, address);
} else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, address);
} else {

View File

@ -22,6 +22,11 @@ import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLConnection;
import java.security.GeneralSecurityException;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLSocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -32,6 +37,7 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.ssl.SSLFactory;
/**
* Utilities for handling URLs
@ -64,6 +70,35 @@ public class URLConnectionFactory {
}
};
/**
* Create a new ConnectionConfigurator for SSL connections
*/
static ConnectionConfigurator newSslConnConfigurator(final int timeout,
Configuration conf) throws IOException, GeneralSecurityException {
final SSLFactory factory;
final SSLSocketFactory sf;
final HostnameVerifier hv;
factory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
factory.init();
sf = factory.createSSLSocketFactory();
hv = factory.getHostnameVerifier();
return new ConnectionConfigurator() {
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
if (conn instanceof HttpsURLConnection) {
HttpsURLConnection c = (HttpsURLConnection) conn;
c.setSSLSocketFactory(sf);
c.setHostnameVerifier(hv);
}
URLConnectionFactory.setTimeouts(conn, timeout);
return conn;
}
};
}
public URLConnectionFactory(int socketTimeout) {
this.socketTimeout = socketTimeout;
}

View File

@ -1,39 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestNameNodeHttpServer {
@Test
public void testSslConfiguration() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
System.setProperty("jetty.ssl.password", "foo");
System.setProperty("jetty.ssl.keypassword", "bar");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.build();
cluster.shutdown();
}
}

View File

@ -18,233 +18,77 @@
package org.apache.hadoop.hdfs.web;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.junit.Assert.*;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.HttpURLConnection;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.net.URISyntaxException;
import java.net.URL;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
import org.apache.hadoop.hdfs.web.HsftpFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.SecurityUtilTestHelper;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
public class TestHftpDelegationToken {
/**
* Test whether HftpFileSystem maintain wire-compatibility for 0.20.203 when
* obtaining delegation token. See HDFS-5440 for more details.
*/
@Test
public void testHdfsDelegationToken() throws Exception {
SecurityUtilTestHelper.setTokenServiceUseIp(true);
final Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation user =
UserGroupInformation.createUserForTesting("oom",
new String[]{"memory"});
Token<?> token = new Token<TokenIdentifier>
(new byte[0], new byte[0],
DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
new Text("127.0.0.1:8020"));
user.addToken(token);
Token<?> token2 = new Token<TokenIdentifier>
(null, null, new Text("other token"), new Text("127.0.0.1:8021"));
user.addToken(token2);
assertEquals("wrong tokens in user", 2, user.getTokens().size());
FileSystem fs =
user.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("hftp://localhost:50470/"), conf);
}
});
assertSame("wrong kind of file system", HftpFileSystem.class,
fs.getClass());
assertSame("wrong token", token,
Whitebox.getInternalState(fs, "renewToken"));
}
@Test
public void testSelectHftpDelegationToken() throws Exception {
SecurityUtilTestHelper.setTokenServiceUseIp(true);
public void testTokenCompatibilityFor203() throws IOException,
URISyntaxException, AuthenticationException {
Configuration conf = new Configuration();
conf.setClass("fs.hftp.impl", HftpFileSystem.class, FileSystem.class);
HftpFileSystem fs = new HftpFileSystem();
int httpPort = 80;
int httpsPort = 443;
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, httpPort);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, httpsPort);
Token<?> token = new Token<TokenIdentifier>(new byte[0], new byte[0],
DelegationTokenIdentifier.HDFS_DELEGATION_KIND, new Text(
"127.0.0.1:8020"));
Credentials cred = new Credentials();
cred.addToken(HftpFileSystem.TOKEN_KIND, token);
ByteArrayOutputStream os = new ByteArrayOutputStream();
cred.write(new DataOutputStream(os));
// test with implicit default port
URI fsUri = URI.create("hftp://localhost");
HftpFileSystem fs = (HftpFileSystem) FileSystem.newInstance(fsUri, conf);
assertEquals(httpPort, fs.getCanonicalUri().getPort());
checkTokenSelection(fs, httpPort, conf);
HttpURLConnection conn = mock(HttpURLConnection.class);
doReturn(new ByteArrayInputStream(os.toByteArray())).when(conn)
.getInputStream();
doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
// test with explicit default port
// Make sure it uses the port from the hftp URI.
fsUri = URI.create("hftp://localhost:"+httpPort);
fs = (HftpFileSystem) FileSystem.newInstance(fsUri, conf);
assertEquals(httpPort, fs.getCanonicalUri().getPort());
checkTokenSelection(fs, httpPort, conf);
URLConnectionFactory factory = mock(URLConnectionFactory.class);
doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
anyBoolean());
// test with non-default port
// Make sure it uses the port from the hftp URI.
fsUri = URI.create("hftp://localhost:"+(httpPort+1));
fs = (HftpFileSystem) FileSystem.newInstance(fsUri, conf);
assertEquals(httpPort+1, fs.getCanonicalUri().getPort());
checkTokenSelection(fs, httpPort + 1, conf);
fs.initialize(new URI("hftp://127.0.0.1:8020"), conf);
fs.connectionFactory = factory;
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5);
}
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("foo",
new String[] { "bar" });
@Test
public void testSelectHsftpDelegationToken() throws Exception {
SecurityUtilTestHelper.setTokenServiceUseIp(true);
TokenAspect<HftpFileSystem> tokenAspect = new TokenAspect<HftpFileSystem>(
fs, HftpFileSystem.TOKEN_KIND);
Configuration conf = new Configuration();
conf.setClass("fs.hsftp.impl", HsftpFileSystem.class, FileSystem.class);
tokenAspect.initDelegationToken(ugi);
tokenAspect.ensureTokenInitialized();
int httpPort = 80;
int httpsPort = 443;
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, httpPort);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, httpsPort);
Assert.assertSame(HftpFileSystem.TOKEN_KIND, fs.getRenewToken().getKind());
// test with implicit default port
URI fsUri = URI.create("hsftp://localhost");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.newInstance(fsUri, conf);
assertEquals(httpsPort, fs.getCanonicalUri().getPort());
checkTokenSelection(fs, httpsPort, conf);
// test with explicit default port
fsUri = URI.create("hsftp://localhost:"+httpsPort);
fs = (HsftpFileSystem) FileSystem.newInstance(fsUri, conf);
assertEquals(httpsPort, fs.getCanonicalUri().getPort());
checkTokenSelection(fs, httpsPort, conf);
// test with non-default port
fsUri = URI.create("hsftp://localhost:"+(httpsPort+1));
fs = (HsftpFileSystem) FileSystem.newInstance(fsUri, conf);
assertEquals(httpsPort+1, fs.getCanonicalUri().getPort());
checkTokenSelection(fs, httpsPort+1, conf);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5);
}
@Test
public void testInsecureRemoteCluster() throws Exception {
final ServerSocket socket = new ServerSocket(0); // just reserve a port
socket.close();
Configuration conf = new Configuration();
URI fsUri = URI.create("hsftp://localhost:"+socket.getLocalPort());
assertNull(FileSystem.newInstance(fsUri, conf).getDelegationToken(null));
}
@Test
public void testSecureClusterError() throws Exception {
final ServerSocket socket = new ServerSocket(0);
Thread t = new Thread() {
@Override
public void run() {
while (true) { // fetching does a few retries
try {
Socket s = socket.accept();
s.getOutputStream().write(1234);
s.shutdownOutput();
} catch (Exception e) {
break;
}
}
}
};
t.start();
try {
Configuration conf = new Configuration();
URI fsUri = URI.create("hsftp://localhost:"+socket.getLocalPort());
Exception ex = null;
try {
FileSystem.newInstance(fsUri, conf).getDelegationToken(null);
} catch (Exception e) {
ex = e;
}
assertNotNull(ex);
assertNotNull(ex.getCause());
assertEquals("Remote host closed connection during handshake",
ex.getCause().getMessage());
} finally {
t.interrupt();
}
}
private void checkTokenSelection(HftpFileSystem fs,
int port,
Configuration conf) throws IOException {
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting(fs.getUri().getAuthority(), new String[]{});
@SuppressWarnings("unchecked")
TokenAspect<HftpFileSystem> aspect = (TokenAspect<HftpFileSystem>) Whitebox.getInternalState(fs, "tokenAspect");
// use ip-based tokens
SecurityUtilTestHelper.setTokenServiceUseIp(true);
// test fallback to hdfs token
Token<?> hdfsToken = new Token<TokenIdentifier>(
new byte[0], new byte[0],
DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
new Text("127.0.0.1:8020"));
ugi.addToken(hdfsToken);
// test fallback to hdfs token
Token<?> token = aspect.selectDelegationToken(ugi);
assertNotNull(token);
assertEquals(hdfsToken, token);
// test hftp is favored over hdfs
Token<?> hftpToken = new Token<TokenIdentifier>(
new byte[0], new byte[0],
HftpFileSystem.TOKEN_KIND, new Text("127.0.0.1:"+port));
ugi.addToken(hftpToken);
token = aspect.selectDelegationToken(ugi);
assertNotNull(token);
assertEquals(hftpToken, token);
// switch to using host-based tokens, no token should match
SecurityUtilTestHelper.setTokenServiceUseIp(false);
token = aspect.selectDelegationToken(ugi);
assertNull(token);
// test fallback to hdfs token
hdfsToken = new Token<TokenIdentifier>(
new byte[0], new byte[0],
DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
new Text("localhost:8020"));
ugi.addToken(hdfsToken);
token = aspect.selectDelegationToken(ugi);
assertNotNull(token);
assertEquals(hdfsToken, token);
// test hftp is favored over hdfs
hftpToken = new Token<TokenIdentifier>(
new byte[0], new byte[0],
HftpFileSystem.TOKEN_KIND, new Text("localhost:"+port));
ugi.addToken(hftpToken);
token = aspect.selectDelegationToken(ugi);
assertNotNull(token);
assertEquals(hftpToken, token);
Token<?> tok = (Token<?>) Whitebox.getInternalState(fs, "delegationToken");
Assert.assertNotSame("Not making a copy of the remote token", token, tok);
Assert.assertEquals(token.getKind(), tok.getKind());
}
}

View File

@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
@ -29,23 +30,22 @@ import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLConnection;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.util.ServletUtil;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
@ -53,8 +53,10 @@ import org.junit.BeforeClass;
import org.junit.Test;
public class TestHftpFileSystem {
private static final Random RAN = new Random();
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestHftpFileSystem.class.getSimpleName();
private static String keystoresDir;
private static String sslConfDir;
private static Configuration config = null;
private static MiniDFSCluster cluster = null;
private static String blockPoolId = null;
@ -83,25 +85,28 @@ public class TestHftpFileSystem {
new Path("/foo\">bar/foo\">bar"), };
@BeforeClass
public static void setUp() throws IOException {
((Log4JLogger) HftpFileSystem.LOG).getLogger().setLevel(Level.ALL);
final long seed = RAN.nextLong();
System.out.println("seed=" + seed);
RAN.setSeed(seed);
public static void setUp() throws Exception {
config = new Configuration();
cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build();
blockPoolId = cluster.getNamesystem().getBlockPoolId();
hftpUri = "hftp://"
+ config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHftpFileSystem.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, config, false);
}
@AfterClass
public static void tearDown() throws IOException {
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
@Before
@ -352,9 +357,12 @@ public class TestHftpFileSystem {
Configuration conf = new Configuration();
URI uri = URI.create("hftp://localhost");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
URLConnection conn = fs.connectionFactory.openConnection(new URL("http://localhost"));
assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, conn.getConnectTimeout());
assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, conn.getReadTimeout());
URLConnection conn = fs.connectionFactory.openConnection(new URL(
"http://localhost"));
assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
conn.getConnectTimeout());
assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
conn.getReadTimeout());
}
// /

View File

@ -0,0 +1,83 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.File;
import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestHttpsFileSystem {
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestHttpsFileSystem.class.getSimpleName();
private static MiniDFSCluster cluster;
private static Configuration conf;
private static String keystoresDir;
private static String sslConfDir;
private static String nnAddr;
@BeforeClass
public static void setUp() throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHttpsFileSystem.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
cluster.getFileSystem().create(new Path("/test")).close();
InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
nnAddr = addr.getHostName() + ":" + addr.getPort();
}
@AfterClass
public static void tearDown() throws Exception {
cluster.shutdown();
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
@Test
public void testHsftpFileSystem() throws Exception {
FileSystem fs = FileSystem.get(new URI("hsftp://" + nnAddr), conf);
Assert.assertTrue(fs.exists(new Path("/test")));
fs.close();
}
}

View File

@ -1,26 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<!-- Turn off SSL server authentication for tests by default -->
<property>
<name>ssl.client.do.not.authenticate.server</name>
<value>true</value>
</property>
</configuration>