diff --git a/mapreduce/CHANGES.txt b/mapreduce/CHANGES.txt index 9d0f0be8f7a..8acda6dba7c 100644 --- a/mapreduce/CHANGES.txt +++ b/mapreduce/CHANGES.txt @@ -399,6 +399,9 @@ Trunk (unreleased changes) MAPREDUCE-2541. Fixed a race condition in IndexCache.removeMap. (Binglin Chang via acmurthy) + MAPREDUCE-2839. Fixed TokenCache to get delegation tokens using both new + and old apis. (Siddharth Seth via acmurthy) + Release 0.22.0 - Unreleased diff --git a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/mapreduce/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java index 0a42f5179d1..6fd05feee04 100644 --- a/mapreduce/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java +++ b/mapreduce/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java @@ -19,7 +19,9 @@ package org.apache.hadoop.mapreduce.security; import java.io.IOException; -import java.net.URI; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -28,10 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.HftpFileSystem; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobTracker; @@ -139,6 +138,16 @@ public class TokenCache { return; } } + List> tokens = fs.getDelegationTokens(delegTokenRenewer); + if (tokens != null) { + for (Token token : tokens) { + credentials.addToken(token.getService(), token); + LOG.info("Got dt for " + fs.getUri() + ";uri="+ fsName + + ";t.service="+token.getService()); + } + } + //Call getDelegationToken as well for now - for FS implementations + // which may not have implmented getDelegationTokens (hftp) Token token = fs.getDelegationToken(delegTokenRenewer); if (token != null) { Text fsNameText = new Text(fsName); diff --git a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java b/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java index 8e9cb2cb591..243a8e4ca07 100644 --- a/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java +++ b/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java @@ -30,7 +30,9 @@ import java.net.URI; import java.net.URISyntaxException; import java.security.NoSuchAlgorithmException; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import javax.crypto.KeyGenerator; @@ -38,8 +40,10 @@ import javax.crypto.spec.SecretKeySpec; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.viewfs.ViewFileSystem; import org.apache.hadoop.hdfs.HftpFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -149,6 +153,7 @@ public class TestTokenCache { @BeforeClass public static void setUp() throws Exception { + Configuration conf = new Configuration(); conf.set("hadoop.security.auth_to_local", "RULE:[2:$1]"); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); @@ -334,6 +339,14 @@ public class TestTokenCache { return t; }}).when(hfs).getDelegationToken(renewer); + //when(hfs.getDelegationTokens()).thenReturn((Token) t); + Mockito.doAnswer(new Answer>>(){ + @Override + public List> answer(InvocationOnMock invocation) + throws Throwable { + return Collections.singletonList(t); + }}).when(hfs).getDelegationTokens(renewer); + //when(hfs.getCanonicalServiceName).thenReturn(fs_addr); Mockito.doAnswer(new Answer(){ @Override @@ -378,4 +391,40 @@ public class TestTokenCache { assertEquals("Failed to substitute HOSTNAME_PATTERN with hostName", serviceName + hostName + domainName, TokenCache.getJTPrincipal(conf)); } + + @Test + public void testGetTokensForViewFS() throws IOException, URISyntaxException { + Configuration conf = new Configuration(jConf); + FileSystem dfs = dfsCluster.getFileSystem(); + String serviceName = dfs.getCanonicalServiceName(); + + Path p1 = new Path("/mount1"); + Path p2 = new Path("/mount2"); + p1 = dfs.makeQualified(p1); + p2 = dfs.makeQualified(p2); + + conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString()); + conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString()); + Credentials credentials = new Credentials(); + Path lp1 = new Path("viewfs:///dir1"); + Path lp2 = new Path("viewfs:///dir2"); + Path[] paths = new Path[2]; + paths[0] = lp1; + paths[1] = lp2; + TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf); + + Collection> tns = + credentials.getAllTokens(); + assertEquals("number of tokens is not 1", 1, tns.size()); + + boolean found = false; + for (Token tt : tns) { + System.out.println("token=" + tt); + if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) + && tt.getService().equals(new Text(serviceName))) { + found = true; + } + assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found); + } + } }