From c80dbe5e09ab1eb3c1b0277055f28717895d6dd9 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 17 Apr 2012 22:21:33 +0000 Subject: [PATCH] HDFS-2652. Add support for host-based delegation tokens. Contributed by Daryn Sharp git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327309 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../java/org/apache/hadoop/hdfs/HAUtil.java | 32 ++++++---- .../apache/hadoop/hdfs/HftpFileSystem.java | 30 +++++---- .../delegation/DelegationTokenSelector.java | 37 ++++++----- .../ha/ConfiguredFailoverProxyProvider.java | 14 +++-- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 38 +++++++++--- .../apache/hadoop/hdfs/MiniDFSCluster.java | 15 +++-- .../hadoop/hdfs/TestHftpDelegationToken.java | 56 +++++++++++++++++ ...TestClientProtocolWithDelegationToken.java | 7 +-- .../ha/TestDelegationTokensWithHA.java | 51 ++++++++++++--- .../hadoop/hdfs/web/TestWebHdfsUrl.java | 62 +++++++++++++++++++ 11 files changed, 266 insertions(+), 79 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 60ee1cc64d4..b13218a19af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -860,6 +860,9 @@ Release 0.23.3 - UNRELEASED HDFS-3176. Use MD5MD5CRC32FileChecksum.readFields() in JsonUtil . (Kihwal Lee via szetszwo) + HDFS-2652. Add support for host-based delegation tokens. (Daryn Sharp via + szetszwo) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 943f47497c5..b56892537b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -52,6 +52,9 @@ public class HAUtil { private static final Log LOG = LogFactory.getLog(HAUtil.class); + private static final DelegationTokenSelector tokenSelector = + new DelegationTokenSelector(); + private HAUtil() { /* Hidden constructor */ } /** @@ -241,25 +244,28 @@ public class HAUtil { * one is found, clone it to also represent the underlying namenode address. * @param ugi the UGI to modify * @param haUri the logical URI for the cluster - * @param singleNNAddr one of the NNs in the cluster to which the token + * @param nnAddrs collection of NNs in the cluster to which the token * applies */ public static void cloneDelegationTokenForLogicalUri( UserGroupInformation ugi, URI haUri, - InetSocketAddress singleNNAddr) { - Text haService = buildTokenServiceForLogicalUri(haUri); + Collection nnAddrs) { + Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri); Token haToken = - DelegationTokenSelector.selectHdfsDelegationToken(haService, ugi); - if (haToken == null) { - // no token - return; + tokenSelector.selectToken(haService, ugi.getTokens()); + if (haToken != null) { + for (InetSocketAddress singleNNAddr : nnAddrs) { + Token specificToken = + new Token(haToken); + SecurityUtil.setTokenService(specificToken, singleNNAddr); + ugi.addToken(specificToken); + LOG.debug("Mapped HA service delegation token for logical URI " + + haUri + " to namenode " + singleNNAddr); + } + } else { + LOG.debug("No HA service delegation token found for logical URI " + + haUri); } - Token specificToken = - new Token(haToken); - specificToken.setService(SecurityUtil.buildTokenService(singleNNAddr)); - ugi.addToken(specificToken); - LOG.debug("Mapped HA service delegation token for logical URI " + - haUri + " to namenode " + singleNNAddr); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java index 829190623a8..7151e9f9472 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java @@ -30,6 +30,7 @@ import java.security.PrivilegedExceptionAction; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; +import java.util.Collection; import java.util.TimeZone; import org.apache.hadoop.classification.InterfaceAudience; @@ -48,7 +49,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.common.JspHelper; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher; import org.apache.hadoop.hdfs.web.URLUtils; import org.apache.hadoop.io.Text; @@ -168,10 +168,7 @@ public class HftpFileSystem extends FileSystem protected void initDelegationToken() throws IOException { // look for hftp token, then try hdfs - Token token = selectHftpDelegationToken(); - if (token == null) { - token = selectHdfsDelegationToken(); - } + Token token = selectDelegationToken(); // if we don't already have a token, go get one over https boolean createdToken = false; @@ -192,14 +189,8 @@ public class HftpFileSystem extends FileSystem } } - protected Token selectHftpDelegationToken() { - Text serviceName = SecurityUtil.buildTokenService(nnSecureAddr); - return hftpTokenSelector.selectToken(serviceName, ugi.getTokens()); - } - - protected Token selectHdfsDelegationToken() { - return DelegationTokenSelector.selectHdfsDelegationToken( - nnAddr, ugi, getConf()); + protected Token selectDelegationToken() { + return hftpTokenSelector.selectToken(getUri(), ugi.getTokens(), getConf()); } @@ -699,9 +690,22 @@ public class HftpFileSystem extends FileSystem private static class HftpDelegationTokenSelector extends AbstractDelegationTokenSelector { + private static final DelegationTokenSelector hdfsTokenSelector = + new DelegationTokenSelector(); public HftpDelegationTokenSelector() { super(TOKEN_KIND); } + + Token selectToken(URI nnUri, + Collection> tokens, Configuration conf) { + Token token = + selectToken(SecurityUtil.buildTokenService(nnUri), tokens); + if (token == null) { + // try to get a HDFS token + token = hdfsTokenSelector.selectToken(nnUri, tokens, conf); + } + return token; + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java index 4f73b851645..293611e377f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.security.token.delegation; -import java.net.InetSocketAddress; +import java.net.URI; +import java.util.Collection; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -25,7 +26,6 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; @@ -37,32 +37,35 @@ public class DelegationTokenSelector extends AbstractDelegationTokenSelector{ public static final String SERVICE_NAME_KEY = "hdfs.service.host_"; - private static final DelegationTokenSelector INSTANCE = new DelegationTokenSelector(); - - /** Select the delegation token for hdfs from the ugi. */ - public static Token selectHdfsDelegationToken( - final InetSocketAddress nnAddr, final UserGroupInformation ugi, + /** + * Select the delegation token for hdfs. The port will be rewritten to + * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. + * This method should only be called by non-hdfs filesystems that do not + * use the rpc port to acquire tokens. Ex. webhdfs, hftp + * @param nnUri of the remote namenode + * @param tokens as a collection + * @param conf hadoop configuration + * @return Token + */ + public Token selectToken( + final URI nnUri, Collection> tokens, final Configuration conf) { // this guesses the remote cluster's rpc service port. // the current token design assumes it's the same as the local cluster's // rpc port unless a config key is set. there should be a way to automatic // and correctly determine the value - final String key = SERVICE_NAME_KEY + SecurityUtil.buildTokenService(nnAddr); - final String nnServiceName = conf.get(key); + Text serviceName = SecurityUtil.buildTokenService(nnUri); + final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName); int nnRpcPort = NameNode.DEFAULT_PORT; if (nnServiceName != null) { nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); } + // use original hostname from the uri to avoid unintentional host resolving + serviceName = SecurityUtil.buildTokenService( + NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort)); - final Text serviceName = SecurityUtil.buildTokenService( - new InetSocketAddress(nnAddr.getHostName(), nnRpcPort)); - return INSTANCE.selectToken(serviceName, ugi.getTokens()); - } - - public static Token selectHdfsDelegationToken( - Text serviceName, UserGroupInformation ugi) { - return INSTANCE.selectToken(serviceName, ugi.getTokens()); + return selectToken(serviceName, tokens); } public DelegationTokenSelector() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java index a20880aad65..eab36481e1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Map; @@ -93,14 +94,15 @@ public class ConfiguredFailoverProxyProvider implements "for URI " + uri); } - for (InetSocketAddress address : addressesInNN.values()) { + Collection addressesOfNns = addressesInNN.values(); + for (InetSocketAddress address : addressesOfNns) { proxies.add(new AddressRpcProxyPair(address)); - - // The client may have a delegation token set for the logical - // URI of the cluster. Clone this token to apply to each of the - // underlying IPC addresses so that the IPC code can find it. - HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, address); } + + // The client may have a delegation token set for the logical + // URI of the cluster. Clone this token to apply to each of the + // underlying IPC addresses so that the IPC code can find it. + HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index c64dfb14e8a..415bf6c12dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -29,6 +29,7 @@ import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.StringTokenizer; @@ -117,8 +118,8 @@ public class WebHdfsFileSystem extends FileSystem /** Delegation token kind */ public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); /** Token selector */ - public static final AbstractDelegationTokenSelector DT_SELECTOR - = new AbstractDelegationTokenSelector(TOKEN_KIND) {}; + public static final WebHdfsDelegationTokenSelector DT_SELECTOR + = new WebHdfsDelegationTokenSelector(); private static DelegationTokenRenewer DT_RENEWER = null; @@ -164,7 +165,7 @@ public class WebHdfsFileSystem extends FileSystem } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } - this.nnAddr = NetUtils.createSocketAddr(uri.toString()); + this.nnAddr = NetUtils.createSocketAddrForHost(uri.getHost(), uri.getPort()); this.workingDir = getHomeDirectory(); if (UserGroupInformation.isSecurityEnabled()) { @@ -174,12 +175,7 @@ public class WebHdfsFileSystem extends FileSystem protected void initDelegationToken() throws IOException { // look for webhdfs token, then try hdfs - final Text serviceName = SecurityUtil.buildTokenService(nnAddr); - Token token = DT_SELECTOR.selectToken(serviceName, ugi.getTokens()); - if (token == null) { - token = DelegationTokenSelector.selectHdfsDelegationToken( - nnAddr, ugi, getConf()); - } + Token token = selectDelegationToken(); //since we don't already have a token, go get one boolean createdToken = false; @@ -200,6 +196,10 @@ public class WebHdfsFileSystem extends FileSystem } } + protected Token selectDelegationToken() { + return DT_SELECTOR.selectToken(getUri(), ugi.getTokens(), getConf()); + } + @Override protected int getDefaultPort() { return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, @@ -845,4 +845,24 @@ public class WebHdfsFileSystem extends FileSystem } } } + + private static class WebHdfsDelegationTokenSelector + extends AbstractDelegationTokenSelector { + private static final DelegationTokenSelector hdfsTokenSelector = + new DelegationTokenSelector(); + + public WebHdfsDelegationTokenSelector() { + super(TOKEN_KIND); + } + + Token selectToken(URI nnUri, + Collection> tokens, Configuration conf) { + Token token = + selectToken(SecurityUtil.buildTokenService(nnUri), tokens); + if (token == null) { + token = hdfsTokenSelector.selectToken(nnUri, tokens, conf); + } + return token; + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 2f1d992005d..edab4710607 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -94,6 +94,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.StaticMapping; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.util.StringUtils; @@ -1049,16 +1050,14 @@ public class MiniDFSCluster { if(dn == null) throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY)); - //NOTE: the following is true if and only if: - // hadoop.security.token.service.use_ip=true - //since the HDFS does things based on IP:port, we need to add the mapping - //for IP:port to rackId - String ipAddr = dn.getXferAddress().getAddress().getHostAddress(); + //since the HDFS does things based on host|ip:port, we need to add the + //mapping for the service to rackId + String service = + SecurityUtil.buildTokenService(dn.getXferAddress()).toString(); if (racks != null) { - int port = dn.getXferAddress().getPort(); - LOG.info("Adding node with IP:port : " + ipAddr + ":" + port + + LOG.info("Adding node with service : " + service + " to rack " + racks[i-curDatanodesNum]); - StaticMapping.addNodeToRack(ipAddr + ":" + port, + StaticMapping.addNodeToRack(service, racks[i-curDatanodesNum]); } dn.runDatanodeDaemon(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java index f13d5194d75..e4071222410 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -66,4 +67,59 @@ public class TestHftpDelegationToken { renewToken.setAccessible(true); assertSame("wrong token", token, renewToken.get(fs)); } + + @Test + public void testSelectHdfsDelegationToken() throws Exception { + SecurityUtilTestHelper.setTokenServiceUseIp(true); + + Configuration conf = new Configuration(); + URI hftpUri = URI.create("hftp://localhost:0"); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + Token token = null; + + // test fallback to hdfs token + Token hdfsToken = new Token( + new byte[0], new byte[0], + DelegationTokenIdentifier.HDFS_DELEGATION_KIND, + new Text("127.0.0.1:8020")); + ugi.addToken(hdfsToken); + + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(hftpUri, conf); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hdfsToken, token); + + // test hftp is favored over hdfs + Token hftpToken = new Token( + new byte[0], new byte[0], + HftpFileSystem.TOKEN_KIND, new Text("127.0.0.1:0")); + ugi.addToken(hftpToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hftpToken, token); + + // switch to using host-based tokens, no token should match + SecurityUtilTestHelper.setTokenServiceUseIp(false); + token = fs.selectDelegationToken(); + assertNull(token); + + // test fallback to hdfs token + hdfsToken = new Token( + new byte[0], new byte[0], + DelegationTokenIdentifier.HDFS_DELEGATION_KIND, + new Text("localhost:8020")); + ugi.addToken(hdfsToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hdfsToken, token); + + // test hftp is favored over hdfs + hftpToken = new Token( + new byte[0], new byte[0], + HftpFileSystem.TOKEN_KIND, new Text("localhost:0")); + ugi.addToken(hftpToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hftpToken, token); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java index 5f4696e144f..e54b8bccc28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java @@ -41,6 +41,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SaslInputStream; import org.apache.hadoop.security.SaslRpcClient; import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.log4j.Level; @@ -91,10 +92,8 @@ public class TestClientProtocolWithDelegationToken { DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner, null); Token token = new Token( dtId, sm); - Text host = new Text(addr.getAddress().getHostAddress() + ":" - + addr.getPort()); - token.setService(host); - LOG.info("Service IP address for token is " + host); + SecurityUtil.setTokenService(token, addr); + LOG.info("Service for token is " + token.getService()); current.addToken(token); current.doAs(new PrivilegedExceptionAction() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 5c380915d04..a69a0ce267b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -26,6 +26,7 @@ import java.net.InetSocketAddress; import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.Collection; +import java.util.HashSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -44,11 +45,13 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -100,6 +103,11 @@ public class TestDelegationTokensWithHA { } + @Before + public void prepTest() { + SecurityUtilTestHelper.setTokenServiceUseIp(true); + } + @Test public void testDelegationTokenDFSApi() throws Exception { Token token = dfs.getDelegationToken("JobTracker"); @@ -187,23 +195,48 @@ public class TestDelegationTokensWithHA { URI haUri = new URI("hdfs://my-ha-uri/"); token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri)); ugi.addToken(token); - HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nn0.getNameNodeAddress()); - HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nn1.getNameNodeAddress()); + + Collection nnAddrs = new HashSet(); + nnAddrs.add(nn0.getNameNodeAddress()); + nnAddrs.add(nn1.getNameNodeAddress()); + HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs); Collection> tokens = ugi.getTokens(); assertEquals(3, tokens.size()); LOG.info("Tokens:\n" + Joiner.on("\n").join(tokens)); + DelegationTokenSelector dts = new DelegationTokenSelector(); // check that the token selected for one of the physical IPC addresses // matches the one we received - InetSocketAddress addr = nn0.getNameNodeAddress(); - Text ipcDtService = SecurityUtil.buildTokenService(addr); - Token token2 = - DelegationTokenSelector.selectHdfsDelegationToken(ipcDtService, ugi); - assertNotNull(token2); - assertArrayEquals(token.getIdentifier(), token2.getIdentifier()); - assertArrayEquals(token.getPassword(), token2.getPassword()); + for (InetSocketAddress addr : nnAddrs) { + Text ipcDtService = SecurityUtil.buildTokenService(addr); + Token token2 = + dts.selectToken(ipcDtService, ugi.getTokens()); + assertNotNull(token2); + assertArrayEquals(token.getIdentifier(), token2.getIdentifier()); + assertArrayEquals(token.getPassword(), token2.getPassword()); + } + + // switch to host-based tokens, shouldn't match existing tokens + SecurityUtilTestHelper.setTokenServiceUseIp(false); + for (InetSocketAddress addr : nnAddrs) { + Text ipcDtService = SecurityUtil.buildTokenService(addr); + Token token2 = + dts.selectToken(ipcDtService, ugi.getTokens()); + assertNull(token2); + } + + // reclone the tokens, and see if they match now + HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs); + for (InetSocketAddress addr : nnAddrs) { + Text ipcDtService = SecurityUtil.buildTokenService(addr); + Token token2 = + dts.selectToken(ipcDtService, ugi.getTokens()); + assertNotNull(token2); + assertArrayEquals(token.getIdentifier(), token2.getIdentifier()); + assertArrayEquals(token.getPassword(), token2.getPassword()); + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java index ec90146d608..1dde0997dd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java @@ -34,10 +34,16 @@ import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.HttpOpParam; import org.apache.hadoop.hdfs.web.resources.PutOpParam; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; import org.junit.Assert; import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.mockito.Mockito.mock; public class TestWebHdfsUrl { @@ -90,4 +96,60 @@ public class TestWebHdfsUrl { private String generateUrlQueryPrefix(HttpOpParam.Op op, String username) { return "op=" + op.toString() + "&user.name=" + username; } + + @Test + public void testSelectDelegationToken() throws Exception { + SecurityUtilTestHelper.setTokenServiceUseIp(true); + + Configuration conf = new Configuration(); + URI webHdfsUri = URI.create("webhdfs://localhost:0"); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + Token token = null; + + // test fallback to hdfs token + Token hdfsToken = new Token( + new byte[0], new byte[0], + DelegationTokenIdentifier.HDFS_DELEGATION_KIND, + new Text("127.0.0.1:8020")); + ugi.addToken(hdfsToken); + + WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(webHdfsUri, conf); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hdfsToken, token); + + // test webhdfs is favored over hdfs + Token webHdfsToken = new Token( + new byte[0], new byte[0], + WebHdfsFileSystem.TOKEN_KIND, new Text("127.0.0.1:0")); + ugi.addToken(webHdfsToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(webHdfsToken, token); + + // switch to using host-based tokens, no token should match + SecurityUtilTestHelper.setTokenServiceUseIp(false); + token = fs.selectDelegationToken(); + assertNull(token); + + // test fallback to hdfs token + hdfsToken = new Token( + new byte[0], new byte[0], + DelegationTokenIdentifier.HDFS_DELEGATION_KIND, + new Text("localhost:8020")); + ugi.addToken(hdfsToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(hdfsToken, token); + + // test webhdfs is favored over hdfs + webHdfsToken = new Token( + new byte[0], new byte[0], + WebHdfsFileSystem.TOKEN_KIND, new Text("localhost:0")); + ugi.addToken(webHdfsToken); + token = fs.selectDelegationToken(); + assertNotNull(token); + assertEquals(webHdfsToken, token); + } + }