diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e4bdf258277..23536759fcd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -397,6 +397,9 @@ Release 2.0.0 - UNRELEASED HDFS-3280. DFSOutputStream.sync should not be synchronized (todd) + HDFS-3268. FileContext API mishandles token service and incompatible with + HA (Daryn Sharp via todd) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index 82d0c3663cc..a3217aa7e93 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -20,7 +20,6 @@ package org.apache.hadoop.fs; import java.io.FileNotFoundException; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -391,11 +390,15 @@ public class Hdfs extends AbstractFileSystem { return new Path(dfs.getLinkTarget(getUriPath(p))); } + @Override + public String getCanonicalServiceName() { + return dfs.getCanonicalServiceName(); + } + @Override //AbstractFileSystem public List> getDelegationTokens(String renewer) throws IOException { Token result = dfs .getDelegationToken(renewer == null ? null : new Text(renewer)); - result.setService(new Text(this.getCanonicalServiceName())); List> tokenList = new ArrayList>(); tokenList.add(result); return tokenList; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 5b533ef96d1..c28e3a3719d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -641,6 +641,16 @@ public class DFSClient implements java.io.Closeable { return serverDefaults; } + /** + * Get a canonical token service name for this client's tokens. Null should + * be returned if the client is not using tokens. + * @return the token service for the client + */ + @InterfaceAudience.LimitedPrivate( { "HDFS" }) + public String getCanonicalServiceName() { + return (dtService != null) ? dtService.toString() : null; + } + /** * @see ClientProtocol#getDelegationToken(Text) */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index d335aa8b71b..988a6e7ee3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -848,12 +848,7 @@ public class DistributedFileSystem extends FileSystem { */ @Override public String getCanonicalServiceName() { - URI uri = getUri(); - if (HAUtil.isLogicalUri(getConf(), uri)) { - return HAUtil.buildTokenServiceForLogicalUri(uri).toString(); - } else { - return super.getCanonicalServiceName(); - } + return dfs.getCanonicalServiceName(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index f7755814c4d..5c380915d04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -30,6 +30,7 @@ import java.util.Collection; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -223,6 +224,21 @@ public class TestDelegationTokensWithHA { token.cancel(dfs.getConf()); } + @Test + public void testHdfsGetCanonicalServiceName() throws Exception { + Configuration conf = dfs.getConf(); + URI haUri = HATestUtil.getLogicalUri(cluster); + AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf); + String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString(); + assertEquals(haService, afs.getCanonicalServiceName()); + Token token = afs.getDelegationTokens( + UserGroupInformation.getCurrentUser().getShortUserName()).get(0); + assertEquals(haService, token.getService().toString()); + // make sure the logical uri is handled correctly + token.renew(conf); + token.cancel(conf); + } + enum TokenTestAction { RENEW, CANCEL; }