diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java new file mode 100644 index 00000000000..aa5de2cda75 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderTokenIssuer.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto.key; + +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * File systems that support Encryption Zones have to implement this interface. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public interface KeyProviderTokenIssuer { + + KeyProvider getKeyProvider() throws IOException; + + URI getKeyProviderUri() throws IOException; +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/package-info.java new file mode 100644 index 00000000000..d66bb441ddd --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/package-info.java @@ -0,0 +1,18 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto.key; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 27b1b2d7261..be4de500377 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD; @@ -62,11 +61,9 @@ import javax.net.SocketFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.crypto.CryptoOutputStream; -import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -74,7 +71,6 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStorageLocation; import org.apache.hadoop.fs.CacheFlag; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -177,7 +173,6 @@ import org.apache.hadoop.ipc.RpcNoSuchMethodException; import org.apache.hadoop.net.DNS; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; @@ -213,7 +208,6 @@ import com.google.common.net.InetAddresses; public class DFSClient implements java.io.Closeable, RemotePeerFactory, DataEncryptionKeyFactory { public static final Logger LOG = LoggerFactory.getLogger(DFSClient.class); - private static final String DFS_KMS_PREFIX = "dfs-kms-"; private final Configuration conf; private final Tracer tracer; @@ -1009,55 +1003,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } } - /** - * Obtain the crypto protocol version from the provided FileEncryptionInfo, - * checking to see if this version is supported by. - * - * @param feInfo FileEncryptionInfo - * @return CryptoProtocolVersion from the feInfo - * @throws IOException if the protocol version is unsupported. - */ - private static CryptoProtocolVersion getCryptoProtocolVersion( - FileEncryptionInfo feInfo) throws IOException { - final CryptoProtocolVersion version = feInfo.getCryptoProtocolVersion(); - if (!CryptoProtocolVersion.supports(version)) { - throw new IOException("Client does not support specified " + - "CryptoProtocolVersion " + version.getDescription() + " version " + - "number" + version.getVersion()); - } - return version; - } - - /** - * Obtain a CryptoCodec based on the CipherSuite set in a FileEncryptionInfo - * and the available CryptoCodecs configured in the Configuration. - * - * @param conf Configuration - * @param feInfo FileEncryptionInfo - * @return CryptoCodec - * @throws IOException if no suitable CryptoCodec for the CipherSuite is - * available. - */ - private static CryptoCodec getCryptoCodec(Configuration conf, - FileEncryptionInfo feInfo) throws IOException { - final CipherSuite suite = feInfo.getCipherSuite(); - if (suite.equals(CipherSuite.UNKNOWN)) { - throw new IOException("NameNode specified unknown CipherSuite with ID " - + suite.getUnknownValue() + ", cannot instantiate CryptoCodec."); - } - final CryptoCodec codec = CryptoCodec.getInstance(conf, suite); - if (codec == null) { - throw new UnknownCipherSuiteException( - "No configuration found for the cipher suite " - + suite.getConfigSuffix() + " prefixed with " - + HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX - + ". Please see the example configuration " - + "hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE " - + "at core-default.xml for details."); - } - return codec; - } - /** * Wraps the stream in a CryptoInputStream if the underlying file is * encrypted. @@ -1068,8 +1013,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, if (feInfo != null) { // File is encrypted, wrap the stream in a crypto stream. // Currently only one version, so no special logic based on the version # - getCryptoProtocolVersion(feInfo); - final CryptoCodec codec = getCryptoCodec(conf, feInfo); + HdfsKMSUtil.getCryptoProtocolVersion(feInfo); + final CryptoCodec codec = HdfsKMSUtil.getCryptoCodec(conf, feInfo); final KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo); final CryptoInputStream cryptoIn = new CryptoInputStream(dfsis, codec, decrypted.getMaterial(), @@ -1100,8 +1045,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, if (feInfo != null) { // File is encrypted, wrap the stream in a crypto stream. // Currently only one version, so no special logic based on the version # - getCryptoProtocolVersion(feInfo); - final CryptoCodec codec = getCryptoCodec(conf, feInfo); + HdfsKMSUtil.getCryptoProtocolVersion(feInfo); + final CryptoCodec codec = HdfsKMSUtil.getCryptoCodec(conf, feInfo); KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo); final CryptoOutputStream cryptoOut = new CryptoOutputStream(dfsos, codec, @@ -3023,51 +2968,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, return HEDGED_READ_METRIC; } - /** - * Returns a key to map namenode uri to key provider uri. - * Tasks will lookup this key to find key Provider. - */ - public Text getKeyProviderMapKey() { - return new Text(DFS_KMS_PREFIX + namenodeUri.getScheme() - +"://" + namenodeUri.getAuthority()); - } - - /** - * The key provider uri is searched in the following order. - * 1. If there is a mapping in Credential's secrets map for namenode uri. - * 2. From namenode getServerDefaults rpc. - * 3. Finally fallback to local conf. - * @return keyProviderUri if found from either of above 3 cases, - * null otherwise - * @throws IOException - */ URI getKeyProviderUri() throws IOException { - URI keyProviderUri = null; - // Lookup the secret in credentials object for namenodeuri. - Credentials credentials = ugi.getCredentials(); - byte[] keyProviderUriBytes = credentials.getSecretKey(getKeyProviderMapKey()); - if(keyProviderUriBytes != null) { - keyProviderUri = - URI.create(DFSUtilClient.bytes2String(keyProviderUriBytes)); - return keyProviderUri; - } - - // Query the namenode for the key provider uri. - FsServerDefaults serverDefaults = getServerDefaults(); - if (serverDefaults.getKeyProviderUri() != null) { - if (!serverDefaults.getKeyProviderUri().isEmpty()) { - keyProviderUri = URI.create(serverDefaults.getKeyProviderUri()); - } - return keyProviderUri; - } - - // Last thing is to trust its own conf to be backwards compatible. - String keyProviderUriStr = conf.getTrimmed( - CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH); - if (keyProviderUriStr != null && !keyProviderUriStr.isEmpty()) { - keyProviderUri = URI.create(keyProviderUriStr); - } - return keyProviderUri; + return HdfsKMSUtil.getKeyProviderUri(ugi, namenodeUri, + getServerDefaults().getKeyProviderUri(), conf); } public KeyProvider getKeyProvider() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index fe5a9da1f87..537acd25718 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -42,7 +42,6 @@ import java.util.Map; import javax.net.SocketFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; @@ -69,7 +68,6 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -529,30 +527,6 @@ public class DFSUtilClient { return new ReconfigurationProtocolTranslatorPB(addr, ticket, conf, factory); } - private static String keyProviderUriKeyName = - CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; - - /** - * Set the key provider uri configuration key name for creating key providers. - * @param keyName The configuration key name. - */ - public static void setKeyProviderUriKeyName(final String keyName) { - keyProviderUriKeyName = keyName; - } - - /** - * Creates a new KeyProvider from the given Configuration. - * - * @param conf Configuration - * @return new KeyProvider, or null if no provider was found. - * @throws IOException if the KeyProvider is improperly specified in - * the Configuration - */ - public static KeyProvider createKeyProvider( - final Configuration conf) throws IOException { - return KMSUtil.createKeyProvider(conf, keyProviderUriKeyName); - } - public static Peer peerFromSocket(Socket socket) throws IOException { Peer peer; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index e33858a2832..497f770556a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -98,7 +98,8 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; -import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -113,7 +114,8 @@ import javax.annotation.Nonnull; *****************************************************************/ @InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase" }) @InterfaceStability.Unstable -public class DistributedFileSystem extends FileSystem { +public class DistributedFileSystem extends FileSystem + implements KeyProviderTokenIssuer { private Path workingDir; private URI uri; private String homeDirPrefix = @@ -2521,29 +2523,22 @@ public class DistributedFileSystem extends FileSystem { }.resolve(this, absF); } + @Override + public URI getKeyProviderUri() throws IOException { + return dfs.getKeyProviderUri(); + } + + @Override + public KeyProvider getKeyProvider() throws IOException { + return dfs.getKeyProvider(); + } + @Override public Token[] addDelegationTokens( final String renewer, Credentials credentials) throws IOException { Token[] tokens = super.addDelegationTokens(renewer, credentials); - URI keyProviderUri = dfs.getKeyProviderUri(); - if (keyProviderUri != null) { - KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension = - KeyProviderDelegationTokenExtension. - createKeyProviderDelegationTokenExtension(dfs.getKeyProvider()); - Token[] kpTokens = keyProviderDelegationTokenExtension. - addDelegationTokens(renewer, credentials); - credentials.addSecretKey(dfs.getKeyProviderMapKey(), - DFSUtilClient.string2Bytes(keyProviderUri.toString())); - if (tokens != null && kpTokens != null) { - Token[] all = new Token[tokens.length + kpTokens.length]; - System.arraycopy(tokens, 0, all, 0, tokens.length); - System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length); - tokens = all; - } else { - tokens = (tokens != null) ? tokens : kpTokens; - } - } - return tokens; + return HdfsKMSUtil.addDelegationTokensForKeyProvider( + this, renewer, credentials, uri, tokens); } public DFSInotifyEventInputStream getInotifyEventStream() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java new file mode 100644 index 00000000000..00f4fb2eae3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsKMSUtil.java @@ -0,0 +1,190 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX; + +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CipherSuite; +import org.apache.hadoop.crypto.CryptoCodec; +import org.apache.hadoop.crypto.CryptoProtocolVersion; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension; +import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.KMSUtil; + +/** + * Utility class for key provider related methods in hdfs client package. + * + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class HdfsKMSUtil { + private HdfsKMSUtil() { /* Hidden constructor */ } + + private static final String DFS_KMS_PREFIX = "dfs-kms-"; + private static String keyProviderUriKeyName = + CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; + + /** + * Creates a new KeyProvider from the given Configuration. + * + * @param conf Configuration + * @return new KeyProvider, or null if no provider was found. + * @throws IOException if the KeyProvider is improperly specified in + * the Configuration + */ + public static KeyProvider createKeyProvider( + final Configuration conf) throws IOException { + return KMSUtil.createKeyProvider(conf, keyProviderUriKeyName); + } + + public static Token[] addDelegationTokensForKeyProvider( + KeyProviderTokenIssuer kpTokenIssuer, final String renewer, + Credentials credentials, URI namenodeUri, Token[] tokens) + throws IOException { + KeyProvider keyProvider = kpTokenIssuer.getKeyProvider(); + if (keyProvider != null) { + KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension + = KeyProviderDelegationTokenExtension. + createKeyProviderDelegationTokenExtension(keyProvider); + Token[] kpTokens = keyProviderDelegationTokenExtension. + addDelegationTokens(renewer, credentials); + credentials.addSecretKey(getKeyProviderMapKey(namenodeUri), + DFSUtilClient.string2Bytes( + kpTokenIssuer.getKeyProviderUri().toString())); + if (tokens != null && kpTokens != null) { + Token[] all = new Token[tokens.length + kpTokens.length]; + System.arraycopy(tokens, 0, all, 0, tokens.length); + System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length); + tokens = all; + } else { + tokens = (tokens != null) ? tokens : kpTokens; + } + } + return tokens; + } + + /** + * Obtain the crypto protocol version from the provided FileEncryptionInfo, + * checking to see if this version is supported by. + * + * @param feInfo FileEncryptionInfo + * @return CryptoProtocolVersion from the feInfo + * @throws IOException if the protocol version is unsupported. + */ + public static CryptoProtocolVersion getCryptoProtocolVersion( + FileEncryptionInfo feInfo) throws IOException { + final CryptoProtocolVersion version = feInfo.getCryptoProtocolVersion(); + if (!CryptoProtocolVersion.supports(version)) { + throw new IOException("Client does not support specified " + + "CryptoProtocolVersion " + version.getDescription() + " version " + + "number" + version.getVersion()); + } + return version; + } + + /** + * Obtain a CryptoCodec based on the CipherSuite set in a FileEncryptionInfo + * and the available CryptoCodecs configured in the Configuration. + * + * @param conf Configuration + * @param feInfo FileEncryptionInfo + * @return CryptoCodec + * @throws IOException if no suitable CryptoCodec for the CipherSuite is + * available. + */ + public static CryptoCodec getCryptoCodec(Configuration conf, + FileEncryptionInfo feInfo) throws IOException { + final CipherSuite suite = feInfo.getCipherSuite(); + if (suite.equals(CipherSuite.UNKNOWN)) { + throw new IOException("NameNode specified unknown CipherSuite with ID " + + suite.getUnknownValue() + ", cannot instantiate CryptoCodec."); + } + final CryptoCodec codec = CryptoCodec.getInstance(conf, suite); + if (codec == null) { + throw new UnknownCipherSuiteException( + "No configuration found for the cipher suite " + + suite.getConfigSuffix() + " prefixed with " + + HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX + + ". Please see the example configuration " + + "hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE " + + "at core-default.xml for details."); + } + return codec; + } + + /** + * The key provider uri is searched in the following order. + * 1. If there is a mapping in Credential's secrets map for namenode uri. + * 2. From namenode getServerDefaults call. + * 3. Finally fallback to local conf. + * @return keyProviderUri if found from either of above 3 cases, + * null otherwise + * @throws IOException + */ + public static URI getKeyProviderUri(UserGroupInformation ugi, + URI namenodeUri, String keyProviderUriStr, Configuration conf) + throws IOException { + URI keyProviderUri = null; + // Lookup the secret in credentials object for namenodeuri. + Credentials credentials = ugi.getCredentials(); + byte[] keyProviderUriBytes = + credentials.getSecretKey(getKeyProviderMapKey(namenodeUri)); + if(keyProviderUriBytes != null) { + keyProviderUri = + URI.create(DFSUtilClient.bytes2String(keyProviderUriBytes)); + return keyProviderUri; + } + + if (keyProviderUriStr != null) { + if (!keyProviderUriStr.isEmpty()) { + keyProviderUri = URI.create(keyProviderUriStr); + } + return keyProviderUri; + } + + // Last thing is to trust its own conf to be backwards compatible. + String keyProviderUriFromConf = conf.getTrimmed( + CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH); + if (keyProviderUriFromConf != null && !keyProviderUriFromConf.isEmpty()) { + keyProviderUri = URI.create(keyProviderUriFromConf); + } + return keyProviderUri; + } + + /** + * Returns a key to map namenode uri to key provider uri. + * Tasks will lookup this key to find key Provider. + */ + public static Text getKeyProviderMapKey(URI namenodeUri) { + return new Text(DFS_KMS_PREFIX + namenodeUri.getScheme() + +"://" + namenodeUri.getAuthority()); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 3f9839a142d..0ed8e853775 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -54,6 +54,8 @@ import javax.ws.rs.core.MediaType; import org.apache.commons.io.IOUtils; import org.apache.commons.io.input.BoundedInputStream; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.ContentSummary; @@ -81,6 +83,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HAUtilClient; +import org.apache.hadoop.hdfs.HdfsKMSUtil; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -97,6 +100,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager.InvalidToken; @@ -104,6 +108,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; +import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.StringUtils; import org.codehaus.jackson.map.ObjectMapper; @@ -119,7 +124,7 @@ import com.google.common.collect.Lists; /** A FileSystem for HDFS over the web. */ public class WebHdfsFileSystem extends FileSystem implements DelegationTokenRenewer.Renewable, - TokenAspect.TokenManagementDelegator { + TokenAspect.TokenManagementDelegator, KeyProviderTokenIssuer { public static final Logger LOG = LoggerFactory .getLogger(WebHdfsFileSystem.class); /** WebHdfs version. */ @@ -1621,6 +1626,13 @@ public class WebHdfsFileSystem extends FileSystem } @Override + public Token[] addDelegationTokens(String renewer, + Credentials credentials) throws IOException { + Token[] tokens = super.addDelegationTokens(renewer, credentials); + return HdfsKMSUtil.addDelegationTokensForKeyProvider(this, renewer, + credentials, getUri(), tokens); + } + public BlockLocation[] getFileBlockLocations(final FileStatus status, final long offset, final long length) throws IOException { if (status == null) { @@ -1864,6 +1876,29 @@ public class WebHdfsFileSystem extends FileSystem this.retryPolicy = rp; } + + @Override + public URI getKeyProviderUri() throws IOException { + String keyProviderUri = null; + try { + keyProviderUri = getServerDefaults().getKeyProviderUri(); + } catch (UnsupportedOperationException e) { + // This means server doesn't supports GETSERVERDEFAULTS call. + // Do nothing, let keyProviderUri = null. + } + return HdfsKMSUtil.getKeyProviderUri(ugi, getUri(), keyProviderUri, + getConf()); + } + + @Override + public KeyProvider getKeyProvider() throws IOException { + URI keyProviderUri = getKeyProviderUri(); + if (keyProviderUri == null) { + return null; + } + return KMSUtil.createKeyProviderFromUri(getConf(), keyProviderUri); + } + /** * This class is used for opening, reading, and seeking files while using the * WebHdfsFileSystem. This class will invoke the retry policy when performing diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index d4fd5f73a82..f863037191e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1634,7 +1634,7 @@ public class DFSUtil { */ public static KeyProviderCryptoExtension createKeyProviderCryptoExtension( final Configuration conf) throws IOException { - KeyProvider keyProvider = DFSUtilClient.createKeyProvider(conf); + KeyProvider keyProvider = HdfsKMSUtil.createKeyProvider(conf); if (keyProvider == null) { return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 1dda8b3dc35..b8615b44a76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -1717,7 +1717,8 @@ public class TestEncryptionZones { Credentials credentials = new Credentials(); // Key provider uri should be in the secret map of credentials object with // namenode uri as key - Text lookUpKey = client.getKeyProviderMapKey(); + Text lookUpKey = HdfsKMSUtil.getKeyProviderMapKey( + cluster.getFileSystem().getUri()); credentials.addSecretKey(lookUpKey, DFSUtilClient.string2Bytes(dummyKeyProvider)); client.ugi.addCredentials(credentials); @@ -1868,7 +1869,8 @@ public class TestEncryptionZones { CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH); DFSClient client = cluster.getFileSystem().getClient(); Credentials credentials = new Credentials(); - Text lookUpKey = client.getKeyProviderMapKey(); + Text lookUpKey = HdfsKMSUtil. + getKeyProviderMapKey(cluster.getFileSystem().getUri()); credentials.addSecretKey(lookUpKey, DFSUtilClient.string2Bytes(getKeyProviderURI())); client.ugi.addCredentials(credentials); @@ -1932,4 +1934,38 @@ public class TestEncryptionZones { dfsAdmin.listEncryptionZones().hasNext()); } + /** + * This test returns mocked kms token when + * {@link WebHdfsFileSystem#addDelegationTokens(String, Credentials)} method + * is called. + * @throws Exception + */ + @Test + public void addMockKmsToken() throws Exception { + UserGroupInformation.createRemoteUser("JobTracker"); + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, + WebHdfsConstants.WEBHDFS_SCHEME); + KeyProvider keyProvider = Mockito.mock(KeyProvider.class, withSettings() + .extraInterfaces(DelegationTokenExtension.class, + CryptoExtension.class)); + Mockito.when(keyProvider.getConf()).thenReturn(conf); + byte[] testIdentifier = "Test identifier for delegation token".getBytes(); + + Token testToken = new Token(testIdentifier, new byte[0], + new Text("kms-dt"), new Text()); + Mockito.when(((DelegationTokenExtension) keyProvider) + .addDelegationTokens(anyString(), (Credentials) any())) + .thenReturn(new Token[] {testToken}); + + WebHdfsFileSystem webfsSpy = Mockito.spy(webfs); + Mockito.doReturn(keyProvider).when(webfsSpy).getKeyProvider(); + + Credentials creds = new Credentials(); + final Token[] tokens = + webfsSpy.addDelegationTokens("JobTracker", creds); + + Assert.assertEquals(2, tokens.length); + Assert.assertEquals(tokens[1], testToken); + Assert.assertEquals(1, creds.numberOfTokens()); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java index 6f533625cc7..16ab073e33b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertTrue; import com.google.common.base.Supplier; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; +import org.apache.hadoop.crypto.key.kms.KMSDelegationToken; import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; import org.apache.hadoop.security.Credentials; @@ -28,6 +29,9 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; @@ -134,4 +138,23 @@ public class TestEncryptionZonesWithKMS extends TestEncryptionZones { } }, 1000, 60000); } + + /** + * This method fetches the kms delegation token + * for {@link WebHdfsFileSystem}. + * @throws Exception + */ + @Test + public void addDelegationTokenFromWebhdfsFileSystem() throws Exception { + UserGroupInformation.createRemoteUser("JobTracker"); + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystem( + conf, WebHdfsConstants.WEBHDFS_SCHEME); + Credentials creds = new Credentials(); + final Token[] tokens = webfs.addDelegationTokens("JobTracker", creds); + + Assert.assertEquals(2, tokens.length); + Assert.assertEquals(KMSDelegationToken.TOKEN_KIND_STR, + tokens[1].getKind().toString()); + Assert.assertEquals(2, creds.numberOfTokens()); + } }