Merged r1239763 from trunk for HDFS-2784.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1239797 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jitendra Nath Pandey 2012-02-02 19:56:13 +00:00
parent 1884fe4648
commit 5796391773
11 changed files with 297 additions and 166 deletions

View File

@ -101,6 +101,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2814. NamenodeMXBean does not account for svn revision in the version
information. (Hitesh Shah via jitendra)
HDFS-2784. Update hftp and hdfs for host-based token support.
(Kihwal Lee via jitendra)
OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd)

View File

@ -613,7 +613,7 @@ public class DFSClient implements java.io.Closeable {
DelegationTokenIdentifier.stringifyToken(delToken));
ClientProtocol nn =
DFSUtil.createRPCNamenode
(NameNode.getAddress(token.getService().toString()),
(SecurityUtil.getTokenServiceAddr(delToken),
conf, UserGroupInformation.getCurrentUser());
try {
return nn.renewDelegationToken(delToken);
@ -631,7 +631,7 @@ public class DFSClient implements java.io.Closeable {
LOG.info("Cancelling " +
DelegationTokenIdentifier.stringifyToken(delToken));
ClientProtocol nn = DFSUtil.createRPCNamenode(
NameNode.getAddress(token.getService().toString()), conf,
SecurityUtil.getTokenServiceAddr(delToken), conf,
UserGroupInformation.getCurrentUser());
try {
nn.cancelDelegationToken(delToken);

View File

@ -108,45 +108,10 @@ public class DistributedFileSystem extends FileSystem {
InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
this.dfs = new DFSClient(namenode, conf, statistics);
this.uri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority());
this.workingDir = getHomeDirectory();
}
/** Permit paths which explicitly specify the default port. */
@Override
protected void checkPath(Path path) {
URI thisUri = this.getUri();
URI thatUri = path.toUri();
String thatAuthority = thatUri.getAuthority();
if (thatUri.getScheme() != null
&& thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme())
&& thatUri.getPort() == NameNode.DEFAULT_PORT
&& (thisUri.getPort() == -1 ||
thisUri.getPort() == NameNode.DEFAULT_PORT)
&& thatAuthority.substring(0,thatAuthority.indexOf(":"))
.equalsIgnoreCase(thisUri.getAuthority()))
return;
super.checkPath(path);
}
/** Normalize paths that explicitly specify the default port. */
@Override
public Path makeQualified(Path path) {
URI thisUri = this.getUri();
URI thatUri = path.toUri();
String thatAuthority = thatUri.getAuthority();
if (thatUri.getScheme() != null
&& thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme())
&& thatUri.getPort() == NameNode.DEFAULT_PORT
&& thisUri.getPort() == -1
&& thatAuthority.substring(0,thatAuthority.indexOf(":"))
.equalsIgnoreCase(thisUri.getAuthority())) {
path = new Path(thisUri.getScheme(), thisUri.getAuthority(),
thatUri.getPath());
}
return super.makeQualified(path);
}
@Override
public Path getWorkingDirectory() {
return workingDir;

View File

@ -59,6 +59,7 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ServletUtil;
import org.xml.sax.Attributes;
@ -89,17 +90,20 @@ public class HftpFileSystem extends FileSystem
public static final Text TOKEN_KIND = new Text("HFTP delegation");
private String nnHttpUrl;
private Text hdfsServiceName;
protected UserGroupInformation ugi;
private URI hftpURI;
protected InetSocketAddress nnAddr;
protected UserGroupInformation ugi;
protected InetSocketAddress nnSecureAddr;
public static final String HFTP_TIMEZONE = "UTC";
public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
private Token<?> delegationToken;
private Token<?> renewToken;
private static final HftpDelegationTokenSelector hftpTokenSelector =
new HftpDelegationTokenSelector();
public static final SimpleDateFormat getDateFormat() {
final SimpleDateFormat df = new SimpleDateFormat(HFTP_DATE_FORMAT);
df.setTimeZone(TimeZone.getTimeZone(HFTP_TIMEZONE));
@ -115,11 +119,8 @@ public class HftpFileSystem extends FileSystem
@Override
protected int getDefaultPort() {
return getDefaultSecurePort();
//TODO: un-comment the following once HDFS-7510 is committed.
// return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
// DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
}
protected int getDefaultSecurePort() {
@ -127,16 +128,22 @@ public class HftpFileSystem extends FileSystem
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
}
@Override
public String getCanonicalServiceName() {
return SecurityUtil.buildDTServiceName(hftpURI, getDefaultPort());
}
private String buildUri(String schema, String host, int port) {
StringBuilder sb = new StringBuilder(schema);
return sb.append(host).append(":").append(port).toString();
protected InetSocketAddress getNamenodeAddr(URI uri) {
// use authority so user supplied uri can override port
return NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
}
protected InetSocketAddress getNamenodeSecureAddr(URI uri) {
// must only use the host and the configured https port
return NetUtils.createSocketAddrForHost(uri.getHost(), getDefaultSecurePort());
}
@Override
public String getCanonicalServiceName() {
// unlike other filesystems, hftp's service is the secure port, not the
// actual port in the uri
return SecurityUtil.buildTokenService(nnSecureAddr).toString();
}
@Override
public void initialize(final URI name, final Configuration conf)
@ -144,95 +151,51 @@ public class HftpFileSystem extends FileSystem
super.initialize(name, conf);
setConf(conf);
this.ugi = UserGroupInformation.getCurrentUser();
nnAddr = NetUtils.createSocketAddr(name.toString());
// in case we open connection to hftp of a different cluster
// we need to know this cluster https port
// if it is not set we assume it is the same cluster or same port
int urlPort = conf.getInt("dfs.hftp.https.port", -1);
if(urlPort == -1)
urlPort = conf.getInt(DFSConfigKeys.DFS_HTTPS_PORT_KEY,
DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
String normalizedNN = NetUtils.normalizeHostName(name.getHost());
nnHttpUrl = buildUri("https://", normalizedNN ,urlPort);
LOG.debug("using url to get DT:" + nnHttpUrl);
this.nnAddr = getNamenodeAddr(name);
this.nnSecureAddr = getNamenodeSecureAddr(name);
try {
hftpURI = new URI(buildUri("hftp://", normalizedNN, urlPort));
} catch (URISyntaxException ue) {
throw new IOException("bad uri for hdfs", ue);
}
// if one uses RPC port different from the Default one,
// one should specify what is the setvice name for this delegation token
// otherwise it is hostname:RPC_PORT
String key = DelegationTokenSelector.SERVICE_NAME_KEY
+ SecurityUtil.buildDTServiceName(name,
DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
if(LOG.isDebugEnabled()) {
LOG.debug("Trying to find DT for " + name + " using key=" + key +
"; conf=" + conf.get(key, ""));
}
String nnServiceName = conf.get(key);
int nnPort = NameNode.DEFAULT_PORT;
if (nnServiceName != null) { // get the real port
nnPort = NetUtils.createSocketAddr(nnServiceName,
NameNode.DEFAULT_PORT).getPort();
}
try {
URI hdfsURI = new URI("hdfs://" + normalizedNN + ":" + nnPort);
hdfsServiceName = new Text(SecurityUtil.buildDTServiceName(hdfsURI,
nnPort));
} catch (URISyntaxException ue) {
throw new IOException("bad uri for hdfs", ue);
this.hftpURI = new URI(name.getScheme(), name.getAuthority(),
null, null, null);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
if (UserGroupInformation.isSecurityEnabled()) {
//try finding a token for this namenode (esp applicable for tasks
//using hftp). If there exists one, just set the delegationField
String hftpServiceName = getCanonicalServiceName();
for (Token<? extends TokenIdentifier> t : ugi.getTokens()) {
Text kind = t.getKind();
if (DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind)) {
if (t.getService().equals(hdfsServiceName)) {
setDelegationToken(t);
break;
}
} else if (TOKEN_KIND.equals(kind)) {
if (hftpServiceName
.equals(normalizeService(t.getService().toString()))) {
setDelegationToken(t);
break;
}
}
}
//since we don't already have a token, go get one over https
if (delegationToken == null) {
setDelegationToken(getDelegationToken(null));
initDelegationToken();
}
}
protected void initDelegationToken() throws IOException {
// look for hftp token, then try hdfs
Token<?> token = selectHftpDelegationToken();
if (token == null) {
token = selectHdfsDelegationToken();
}
// if we don't already have a token, go get one over https
boolean createdToken = false;
if (token == null) {
token = getDelegationToken(null);
createdToken = (token != null);
}
// we already had a token or getDelegationToken() didn't fail.
if (token != null) {
setDelegationToken(token);
if (createdToken) {
dtRenewer.addRenewAction(this);
LOG.debug("Created new DT for " + token.getService());
} else {
LOG.debug("Found existing DT for " + token.getService());
}
}
}
private String normalizeService(String service) {
int colonIndex = service.indexOf(':');
if (colonIndex == -1) {
throw new IllegalArgumentException("Invalid service for hftp token: " +
service);
}
String hostname =
NetUtils.normalizeHostName(service.substring(0, colonIndex));
String port = service.substring(colonIndex + 1);
return hostname + ":" + port;
protected Token<DelegationTokenIdentifier> selectHftpDelegationToken() {
Text serviceName = SecurityUtil.buildTokenService(nnSecureAddr);
return hftpTokenSelector.selectToken(serviceName, ugi.getTokens());
}
//TODO: un-comment the following once HDFS-7510 is committed.
// protected Token<DelegationTokenIdentifier> selectHftpDelegationToken() {
// Text serviceName = SecurityUtil.buildTokenService(nnSecureAddr);
// return hftpTokenSelector.selectToken(serviceName, ugi.getTokens());
// }
protected Token<DelegationTokenIdentifier> selectHdfsDelegationToken() {
return DelegationTokenSelector.selectHdfsDelegationToken(
nnAddr, ugi, getConf());
@ -245,13 +208,17 @@ public class HftpFileSystem extends FileSystem
}
@Override
public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
public synchronized <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
renewToken = token;
// emulate the 203 usage of the tokens
// by setting the kind and service as if they were hdfs tokens
delegationToken = new Token<T>(token);
// NOTE: the remote nn must be configured to use hdfs
delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
delegationToken.setService(hdfsServiceName);
// no need to change service because we aren't exactly sure what it
// should be. we can guess, but it might be wrong if the local conf
// value is incorrect. the service is a client side field, so the remote
// end does not care about the value
}
@Override
@ -262,6 +229,7 @@ public class HftpFileSystem extends FileSystem
ugi.reloginFromKeytab();
return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
public Token<?> run() throws IOException {
final String nnHttpUrl = DFSUtil.createUri("https", nnSecureAddr).toString();
Credentials c;
try {
c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer);
@ -291,12 +259,7 @@ public class HftpFileSystem extends FileSystem
@Override
public URI getUri() {
try {
return new URI("hftp", null, nnAddr.getHostName(), nnAddr.getPort(),
null, null, null);
} catch (URISyntaxException e) {
return null;
}
return hftpURI;
}
/**
@ -722,11 +685,12 @@ public class HftpFileSystem extends FileSystem
public long renew(Token<?> token,
Configuration conf) throws IOException {
// update the kerberos credentials, if they are coming from a keytab
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
UserGroupInformation.getLoginUser().reloginFromKeytab();
// use https to renew the token
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
return
DelegationTokenFetcher.renewDelegationToken
("https://" + token.getService().toString(),
(DFSUtil.createUri("https", serviceAddr).toString(),
(Token<DelegationTokenIdentifier>) token);
}
@ -737,10 +701,18 @@ public class HftpFileSystem extends FileSystem
// update the kerberos credentials, if they are coming from a keytab
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
// use https to cancel the token
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
DelegationTokenFetcher.cancelDelegationToken
("https://" + token.getService().toString(),
(DFSUtil.createUri("https", serviceAddr).toString(),
(Token<DelegationTokenIdentifier>) token);
}
}
private static class HftpDelegationTokenSelector
extends AbstractDelegationTokenSelector<DelegationTokenIdentifier> {
public HftpDelegationTokenSelector() {
super(TOKEN_KIND);
}
}
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
@ -120,6 +121,16 @@ public class HsftpFileSystem extends HftpFileSystem {
}
}
@Override
protected int getDefaultPort() {
return getDefaultSecurePort();
}
@Override
protected InetSocketAddress getNamenodeSecureAddr(URI uri) {
return getNamenodeAddr(uri);
}
@Override
protected HttpURLConnection openConnection(String path, String query)
throws IOException {
@ -161,16 +172,6 @@ public class HsftpFileSystem extends HftpFileSystem {
return (HttpURLConnection) conn;
}
@Override
public URI getUri() {
try {
return new URI("hsftp", null, nnAddr.getHostName(), nnAddr.getPort(),
null, null, null);
} catch (URISyntaxException e) {
return null;
}
}
/**
* Dummy hostname verifier that is used to bypass hostname checking
*/

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
@ -296,8 +297,7 @@ public class DelegationTokenSecretManager
}
final InetSocketAddress addr = namenode.getNameNodeAddress();
final String s = addr.getAddress().getHostAddress() + ":" + addr.getPort();
token.setService(new Text(s));
SecurityUtil.setTokenService(token, addr);
final Credentials c = new Credentials();
c.addToken(new Text(ugi.getShortUserName()), token);
return c;

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.http.HtmlQuoting;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authentication.util.KerberosName;
@ -492,7 +493,7 @@ public class JspHelper {
return UserGroupInformation.createRemoteUser(strings[0]);
}
private static String getNNServiceAddress(ServletContext context,
private static InetSocketAddress getNNServiceAddress(ServletContext context,
HttpServletRequest request) {
String namenodeAddressInUrl = request.getParameter(NAMENODE_ADDRESS);
InetSocketAddress namenodeAddress = null;
@ -503,8 +504,7 @@ public class JspHelper {
context);
}
if (namenodeAddress != null) {
return (namenodeAddress.getAddress().getHostAddress() + ":"
+ namenodeAddress.getPort());
return namenodeAddress;
}
return null;
}
@ -547,9 +547,9 @@ public class JspHelper {
Token<DelegationTokenIdentifier> token =
new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(tokenString);
String serviceAddress = getNNServiceAddress(context, request);
InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
if (serviceAddress != null) {
token.setService(new Text(serviceAddress));
SecurityUtil.setTokenService(token, serviceAddress);
token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
}
ByteArrayInputStream buf = new ByteArrayInputStream(token

View File

@ -25,6 +25,7 @@ import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLConnection;
import java.security.PrivilegedExceptionAction;
@ -49,6 +50,7 @@ import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
@ -201,7 +203,8 @@ public class DelegationTokenFetcher {
static public Credentials getDTfromRemote(String nnAddr,
String renewer) throws IOException {
DataInputStream dis = null;
InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnAddr);
try {
StringBuffer url = new StringBuffer();
if (renewer != null) {
@ -221,9 +224,7 @@ public class DelegationTokenFetcher {
ts.readFields(dis);
for(Token<?> token: ts.getAllTokens()) {
token.setKind(HftpFileSystem.TOKEN_KIND);
token.setService(new Text(SecurityUtil.buildDTServiceName
(remoteURL.toURI(),
DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT)));
SecurityUtil.setTokenService(token, serviceAddr);
}
return ts;
} catch (Exception e) {

View File

@ -902,6 +902,8 @@ public class MiniDFSCluster {
if(dn == null)
throw new IOException("Cannot start DataNode in "
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
//NOTE: the following is true if and only if:
// hadoop.security.token.service.use_ip=true
//since the HDFS does things based on IP:port, we need to add the mapping
//for IP:port to rackId
String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URI;
import java.net.URL;
import java.net.HttpURLConnection;
import java.util.Random;
@ -232,4 +233,164 @@ public class TestHftpFileSystem {
in.seek(7);
assertEquals('7', in.read());
}
public void resetFileSystem() throws IOException {
// filesystem caching has a quirk/bug that it caches based on the user's
// given uri. the result is if a filesystem is instantiated with no port,
// it gets the default port. then if the default port is changed,
// and another filesystem is instantiated with no port, the prior fs
// is returned, not a new one using the changed port. so let's flush
// the cache between tests...
FileSystem.closeAll();
}
@Test
public void testHftpDefaultPorts() throws IOException {
resetFileSystem();
Configuration conf = new Configuration();
URI uri = URI.create("hftp://localhost");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort());
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
assertEquals(uri, fs.getUri());
assertEquals(
"127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
fs.getCanonicalServiceName()
);
}
@Test
public void testHftpCustomDefaultPorts() throws IOException {
resetFileSystem();
Configuration conf = new Configuration();
conf.setInt("dfs.http.port", 123);
conf.setInt("dfs.https.port", 456);
URI uri = URI.create("hftp://localhost");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
assertEquals(123, fs.getDefaultPort());
assertEquals(456, fs.getDefaultSecurePort());
assertEquals(uri, fs.getUri());
assertEquals(
"127.0.0.1:456",
fs.getCanonicalServiceName()
);
}
@Test
public void testHftpCustomUriPortWithDefaultPorts() throws IOException {
resetFileSystem();
Configuration conf = new Configuration();
URI uri = URI.create("hftp://localhost:123");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort());
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
assertEquals(uri, fs.getUri());
assertEquals(
"127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
fs.getCanonicalServiceName()
);
}
@Test
public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
resetFileSystem();
Configuration conf = new Configuration();
conf.setInt("dfs.http.port", 123);
conf.setInt("dfs.https.port", 456);
URI uri = URI.create("hftp://localhost:789");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
assertEquals(123, fs.getDefaultPort());
assertEquals(456, fs.getDefaultSecurePort());
assertEquals(uri, fs.getUri());
assertEquals(
"127.0.0.1:456",
fs.getCanonicalServiceName()
);
}
///
@Test
public void testHsftpDefaultPorts() throws IOException {
resetFileSystem();
Configuration conf = new Configuration();
URI uri = URI.create("hsftp://localhost");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort());
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
assertEquals(uri, fs.getUri());
assertEquals(
"127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
fs.getCanonicalServiceName()
);
}
@Test
public void testHsftpCustomDefaultPorts() throws IOException {
resetFileSystem();
Configuration conf = new Configuration();
conf.setInt("dfs.http.port", 123);
conf.setInt("dfs.https.port", 456);
URI uri = URI.create("hsftp://localhost");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
assertEquals(456, fs.getDefaultPort());
assertEquals(456, fs.getDefaultSecurePort());
assertEquals(uri, fs.getUri());
assertEquals(
"127.0.0.1:456",
fs.getCanonicalServiceName()
);
}
@Test
public void testHsftpCustomUriPortWithDefaultPorts() throws IOException {
resetFileSystem();
Configuration conf = new Configuration();
URI uri = URI.create("hsftp://localhost:123");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort());
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
assertEquals(uri, fs.getUri());
assertEquals(
"127.0.0.1:123",
fs.getCanonicalServiceName()
);
}
@Test
public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
resetFileSystem();
Configuration conf = new Configuration();
conf.setInt("dfs.http.port", 123);
conf.setInt("dfs.https.port", 456);
URI uri = URI.create("hsftp://localhost:789");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
assertEquals(456, fs.getDefaultPort());
assertEquals(456, fs.getDefaultSecurePort());
assertEquals(uri, fs.getUri());
assertEquals(
"127.0.0.1:789",
fs.getCanonicalServiceName()
);
}
}

View File

@ -0,0 +1,26 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<!-- Turn off SSL server authentication for tests by default -->
<property>
<name>ssl.client.do.not.authenticate.server</name>
<value>true</value>
</property>
</configuration>