HDFS-6667. Merge change r1611508 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1611510 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2014-07-17 23:16:48 +00:00
parent 136aa73c78
commit c02ea4b655
7 changed files with 40 additions and 21 deletions

View File

@ -74,6 +74,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6693. TestDFSAdminWithHA fails on windows ( vinayakumarb ) HDFS-6693. TestDFSAdminWithHA fails on windows ( vinayakumarb )
HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails
with Client cannot authenticate via:[TOKEN, KERBEROS] error. (jing9)
Release 2.5.0 - UNRELEASED Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -37,13 +37,13 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo; import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
@ -257,12 +257,11 @@ public class HAUtil {
/** /**
* Parse the file system URI out of the provided token. * Parse the file system URI out of the provided token.
*/ */
public static URI getServiceUriFromToken(final String scheme, public static URI getServiceUriFromToken(final String scheme, Token<?> token) {
Token<?> token) {
String tokStr = token.getService().toString(); String tokStr = token.getService().toString();
final String prefix = buildTokenServicePrefixForLogicalUri(scheme);
if (tokStr.startsWith(HA_DT_SERVICE_PREFIX)) { if (tokStr.startsWith(prefix)) {
tokStr = tokStr.replaceFirst(HA_DT_SERVICE_PREFIX, ""); tokStr = tokStr.replaceFirst(prefix, "");
} }
return URI.create(scheme + "://" + tokStr); return URI.create(scheme + "://" + tokStr);
} }
@ -271,10 +270,13 @@ public class HAUtil {
* Get the service name used in the delegation token for the given logical * Get the service name used in the delegation token for the given logical
* HA service. * HA service.
* @param uri the logical URI of the cluster * @param uri the logical URI of the cluster
* @param scheme the scheme of the corresponding FileSystem
* @return the service name * @return the service name
*/ */
public static Text buildTokenServiceForLogicalUri(URI uri) { public static Text buildTokenServiceForLogicalUri(final URI uri,
return new Text(HA_DT_SERVICE_PREFIX + uri.getHost()); final String scheme) {
return new Text(buildTokenServicePrefixForLogicalUri(scheme)
+ uri.getHost());
} }
/** /**
@ -285,6 +287,10 @@ public class HAUtil {
return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX); return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
} }
public static String buildTokenServicePrefixForLogicalUri(String scheme) {
return HA_DT_SERVICE_PREFIX + scheme + ":";
}
/** /**
* Locate a delegation token associated with the given HA cluster URI, and if * Locate a delegation token associated with the given HA cluster URI, and if
* one is found, clone it to also represent the underlying namenode address. * one is found, clone it to also represent the underlying namenode address.
@ -296,7 +302,9 @@ public class HAUtil {
public static void cloneDelegationTokenForLogicalUri( public static void cloneDelegationTokenForLogicalUri(
UserGroupInformation ugi, URI haUri, UserGroupInformation ugi, URI haUri,
Collection<InetSocketAddress> nnAddrs) { Collection<InetSocketAddress> nnAddrs) {
Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri); // this cloning logic is only used by hdfs
Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
HdfsConstants.HDFS_URI_SCHEME);
Token<DelegationTokenIdentifier> haToken = Token<DelegationTokenIdentifier> haToken =
tokenSelector.selectToken(haService, ugi.getTokens()); tokenSelector.selectToken(haService, ugi.getTokens());
if (haToken != null) { if (haToken != null) {
@ -307,8 +315,9 @@ public class HAUtil {
Token<DelegationTokenIdentifier> specificToken = Token<DelegationTokenIdentifier> specificToken =
new Token.PrivateToken<DelegationTokenIdentifier>(haToken); new Token.PrivateToken<DelegationTokenIdentifier>(haToken);
SecurityUtil.setTokenService(specificToken, singleNNAddr); SecurityUtil.setTokenService(specificToken, singleNNAddr);
Text alias = Text alias = new Text(
new Text(HA_DT_SERVICE_PREFIX + "//" + specificToken.getService()); buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
+ "//" + specificToken.getService());
ugi.addToken(alias, specificToken); ugi.addToken(alias, specificToken);
LOG.debug("Mapped HA service delegation token for logical URI " + LOG.debug("Mapped HA service delegation token for logical URI " +
haUri + " to namenode " + singleNNAddr); haUri + " to namenode " + singleNNAddr);

View File

@ -163,7 +163,8 @@ public class NameNodeProxies {
Text dtService; Text dtService;
if (failoverProxyProvider.useLogicalURI()) { if (failoverProxyProvider.useLogicalURI()) {
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
HdfsConstants.HDFS_URI_SCHEME);
} else { } else {
dtService = SecurityUtil.buildTokenService( dtService = SecurityUtil.buildTokenService(
NameNode.getAddress(nameNodeUri)); NameNode.getAddress(nameNodeUri));
@ -224,7 +225,8 @@ public class NameNodeProxies {
new Class[] { xface }, dummyHandler); new Class[] { xface }, dummyHandler);
Text dtService; Text dtService;
if (failoverProxyProvider.useLogicalURI()) { if (failoverProxyProvider.useLogicalURI()) {
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
HdfsConstants.HDFS_URI_SCHEME);
} else { } else {
dtService = SecurityUtil.buildTokenService( dtService = SecurityUtil.buildTokenService(
NameNode.getAddress(nameNodeUri)); NameNode.getAddress(nameNodeUri));

View File

@ -126,7 +126,7 @@ public class HdfsConstants {
* of a delgation token, indicating that the URI is a logical (HA) * of a delgation token, indicating that the URI is a logical (HA)
* URI. * URI.
*/ */
public static final String HA_DT_SERVICE_PREFIX = "ha-hdfs:"; public static final String HA_DT_SERVICE_PREFIX = "ha-";
/** /**
* Path components that are reserved in HDFS. * Path components that are reserved in HDFS.

View File

@ -128,7 +128,8 @@ public class DatanodeWebHdfsMethods {
"://" + nnId); "://" + nnId);
boolean isLogical = HAUtil.isLogicalUri(conf, nnUri); boolean isLogical = HAUtil.isLogicalUri(conf, nnUri);
if (isLogical) { if (isLogical) {
token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri)); token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri,
HdfsConstants.HDFS_URI_SCHEME));
} else { } else {
token.setService(SecurityUtil.buildTokenService(nnUri)); token.setService(SecurityUtil.buildTokenService(nnUri));
} }

View File

@ -157,7 +157,7 @@ public class WebHdfsFileSystem extends FileSystem
// getCanonicalUri() in order to handle the case where no port is // getCanonicalUri() in order to handle the case where no port is
// specified in the URI // specified in the URI
this.tokenServiceName = isLogicalUri ? this.tokenServiceName = isLogicalUri ?
HAUtil.buildTokenServiceForLogicalUri(uri) HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
: SecurityUtil.buildTokenService(getCanonicalUri()); : SecurityUtil.buildTokenService(getCanonicalUri());
if (!isHA) { if (!isHA) {

View File

@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@ -299,7 +300,8 @@ public class TestDelegationTokensWithHA {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test"); UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
URI haUri = new URI("hdfs://my-ha-uri/"); URI haUri = new URI("hdfs://my-ha-uri/");
token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri)); token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,
HdfsConstants.HDFS_URI_SCHEME));
ugi.addToken(token); ugi.addToken(token);
Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>(); Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
@ -355,7 +357,8 @@ public class TestDelegationTokensWithHA {
@Test @Test
public void testDFSGetCanonicalServiceName() throws Exception { public void testDFSGetCanonicalServiceName() throws Exception {
URI hAUri = HATestUtil.getLogicalUri(cluster); URI hAUri = HATestUtil.getLogicalUri(cluster);
String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString(); String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, dfs.getCanonicalServiceName()); assertEquals(haService, dfs.getCanonicalServiceName());
final String renewer = UserGroupInformation.getCurrentUser().getShortUserName(); final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
final Token<DelegationTokenIdentifier> token = final Token<DelegationTokenIdentifier> token =
@ -371,7 +374,8 @@ public class TestDelegationTokensWithHA {
Configuration conf = dfs.getConf(); Configuration conf = dfs.getConf();
URI haUri = HATestUtil.getLogicalUri(cluster); URI haUri = HATestUtil.getLogicalUri(cluster);
AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf); AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf);
String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString(); String haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, afs.getCanonicalServiceName()); assertEquals(haService, afs.getCanonicalServiceName());
Token<?> token = afs.getDelegationTokens( Token<?> token = afs.getDelegationTokens(
UserGroupInformation.getCurrentUser().getShortUserName()).get(0); UserGroupInformation.getCurrentUser().getShortUserName()).get(0);