From 0e7d1dbf9ab732dd04dccaacbf273e9ac437eba5 Mon Sep 17 00:00:00 2001 From: junping_du Date: Tue, 16 Sep 2014 18:25:45 -0700 Subject: [PATCH 01/10] YARN-1250. Addendum --- hadoop-yarn-project/CHANGES.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ec59cba0f5d..51fe3cce108 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -88,6 +88,9 @@ Release 2.6.0 - UNRELEASED and enforce/not-enforce strict control of per-container cpu usage. (Varun Vasudev via vinodkv) + YARN-1250. Generic history service should support application-acls. (Zhijie Shen + via junping_du) + IMPROVEMENTS YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc From e14e71d5feff961b681d828b00e6f12cb197ebf5 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Tue, 16 Sep 2014 14:32:49 -0700 Subject: [PATCH 02/10] HADOOP-11096. KMS: KeyAuthorizationKeyProvider should verify the keyversion belongs to the keyname on decrypt. (tucu) --- .../hadoop-common/CHANGES.txt | 3 ++ .../key/KeyProviderCryptoExtension.java | 8 +-- .../key/TestKeyProviderCryptoExtension.java | 2 +- .../server/KeyAuthorizationKeyProvider.java | 12 +++++ .../TestKeyAuthorizationKeyProvider.java | 53 +++++++++++++++++++ .../org/apache/hadoop/hdfs/DFSClient.java | 3 +- 6 files changed, 76 insertions(+), 5 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3bf9d4baf5c..9324acda2ff 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -815,6 +815,9 @@ Release 2.6.0 - UNRELEASED HADOOP-11088. Unittest TestKeyShell, TestCredShell and TestKMS assume UNIX path separator for JECKS key store path. (Xiaoyu Yao via cnauroth) + HADOOP-11096. KMS: KeyAuthorizationKeyProvider should verify the keyversion + belongs to the keyname on decrypt. (tucu) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java index fed7e9e4d97..968e341338b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java @@ -91,6 +91,8 @@ public class KeyProviderCryptoExtension extends * returned EncryptedKeyVersion will only partially be populated; it is not * necessarily suitable for operations besides decryption. * + * @param keyName Key name of the encryption key use to encrypt the + * encrypted key. * @param encryptionKeyVersionName Version name of the encryption key used * to encrypt the encrypted key. * @param encryptedKeyIv Initialization vector of the encrypted @@ -100,12 +102,12 @@ public class KeyProviderCryptoExtension extends * @param encryptedKeyMaterial Key material of the encrypted key. * @return EncryptedKeyVersion suitable for decryption. */ - public static EncryptedKeyVersion createForDecryption(String - encryptionKeyVersionName, byte[] encryptedKeyIv, + public static EncryptedKeyVersion createForDecryption(String keyName, + String encryptionKeyVersionName, byte[] encryptedKeyIv, byte[] encryptedKeyMaterial) { KeyVersion encryptedKeyVersion = new KeyVersion(null, EEK, encryptedKeyMaterial); - return new EncryptedKeyVersion(null, encryptionKeyVersionName, + return new EncryptedKeyVersion(keyName, encryptionKeyVersionName, encryptedKeyIv, encryptedKeyVersion); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java index 70ec6feaf10..62e3310173d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java @@ -121,7 +121,7 @@ public class TestKeyProviderCryptoExtension { // Test the createForDecryption factory method EncryptedKeyVersion eek2 = - EncryptedKeyVersion.createForDecryption( + EncryptedKeyVersion.createForDecryption(eek.getEncryptionKeyName(), eek.getEncryptionKeyVersionName(), eek.getEncryptedKeyIv(), eek.getEncryptedKeyVersion().getMaterial()); diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java index fe908e38c94..bccec4aeee5 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java @@ -192,9 +192,21 @@ public class KeyAuthorizationKeyProvider extends KeyProviderCryptoExtension { return provider.generateEncryptedKey(encryptionKeyName); } + private void verifyKeyVersionBelongsToKey(EncryptedKeyVersion ekv) + throws IOException { + String kn = ekv.getEncryptionKeyName(); + String kvn = ekv.getEncryptionKeyVersionName(); + KeyVersion kv = provider.getKeyVersion(kvn); + if (!kv.getName().equals(kn)) { + throw new IllegalArgumentException(String.format( + "KeyVersion '%s' does not belong to the key '%s'", kvn, kn)); + } + } + @Override public KeyVersion decryptEncryptedKey(EncryptedKeyVersion encryptedKeyVersion) throws IOException, GeneralSecurityException { + verifyKeyVersionBelongsToKey(encryptedKeyVersion); doAccessCheck( encryptedKeyVersion.getEncryptionKeyName(), KeyOpType.DECRYPT_EEK); return provider.decryptEncryptedKey(encryptedKeyVersion); diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKeyAuthorizationKeyProvider.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKeyAuthorizationKeyProvider.java index a79926a9cd3..1db3d706883 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKeyAuthorizationKeyProvider.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKeyAuthorizationKeyProvider.java @@ -215,4 +215,57 @@ public class TestKeyAuthorizationKeyProvider { return options; } + + @Test(expected = IllegalArgumentException.class) + public void testDecryptWithKeyVersionNameKeyMismatch() throws Exception { + final Configuration conf = new Configuration(); + KeyProvider kp = + new UserProvider.Factory().createProvider(new URI("user:///"), conf); + KeyACLs mock = mock(KeyACLs.class); + when(mock.isACLPresent("testKey", KeyOpType.MANAGEMENT)).thenReturn(true); + when(mock.isACLPresent("testKey", KeyOpType.GENERATE_EEK)).thenReturn(true); + when(mock.isACLPresent("testKey", KeyOpType.DECRYPT_EEK)).thenReturn(true); + when(mock.isACLPresent("testKey", KeyOpType.ALL)).thenReturn(true); + UserGroupInformation u1 = UserGroupInformation.createRemoteUser("u1"); + UserGroupInformation u2 = UserGroupInformation.createRemoteUser("u2"); + UserGroupInformation u3 = UserGroupInformation.createRemoteUser("u3"); + UserGroupInformation sudo = UserGroupInformation.createRemoteUser("sudo"); + when(mock.hasAccessToKey("testKey", u1, + KeyOpType.MANAGEMENT)).thenReturn(true); + when(mock.hasAccessToKey("testKey", u2, + KeyOpType.GENERATE_EEK)).thenReturn(true); + when(mock.hasAccessToKey("testKey", u3, + KeyOpType.DECRYPT_EEK)).thenReturn(true); + when(mock.hasAccessToKey("testKey", sudo, + KeyOpType.ALL)).thenReturn(true); + final KeyProviderCryptoExtension kpExt = + new KeyAuthorizationKeyProvider( + KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp), + mock); + + sudo.doAs( + new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + Options opt = newOptions(conf); + Map m = new HashMap(); + m.put("key.acl.name", "testKey"); + opt.setAttributes(m); + KeyVersion kv = + kpExt.createKey("foo", SecureRandom.getSeed(16), opt); + kpExt.rollNewVersion(kv.getName()); + kpExt.rollNewVersion(kv.getName(), SecureRandom.getSeed(16)); + EncryptedKeyVersion ekv = kpExt.generateEncryptedKey(kv.getName()); + ekv = EncryptedKeyVersion.createForDecryption( + ekv.getEncryptionKeyName() + "x", + ekv.getEncryptionKeyVersionName(), + ekv.getEncryptedKeyIv(), + ekv.getEncryptedKeyVersion().getMaterial()); + kpExt.decryptEncryptedKey(ekv); + return null; + } + } + ); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 9da8efc7480..456fac63425 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1321,7 +1321,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, " an encrypted file"); } EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption( - feInfo.getEzKeyVersionName(), feInfo.getIV(), + //TODO: here we have to put the keyName to be provided by HDFS-6987 + null, feInfo.getEzKeyVersionName(), feInfo.getIV(), feInfo.getEncryptedDataEncryptionKey()); try { return provider.decryptEncryptedKey(ekv); From 8cf1052beb7cab68be1a6319c0a4d7e1c790d58a Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Tue, 16 Sep 2014 21:47:55 -0700 Subject: [PATCH 03/10] HADOOP-11097. kms docs say proxyusers, not proxyuser for config params. (clamb via tucu) --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-kms/src/site/apt/index.apt.vm | 8 ++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 9324acda2ff..11151f0223c 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -818,6 +818,9 @@ Release 2.6.0 - UNRELEASED HADOOP-11096. KMS: KeyAuthorizationKeyProvider should verify the keyversion belongs to the keyname on decrypt. (tucu) + HADOOP-11097. kms docs say proxyusers, not proxyuser for config params. + (clamb via tucu) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index c76ca3b30ad..d70f2a6d62e 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -197,22 +197,22 @@ hadoop-${project.version} $ sbin/kms.sh start *** KMS Proxyuser Configuration - Each proxyusers must be configured in <<>> using the + Each proxyuser must be configured in <<>> using the following properties: +---+ - hadoop.kms.proxyusers.#USER#.users + hadoop.kms.proxyuser.#USER#.users * - hadoop.kms.proxyusers.#USER#.groups + hadoop.kms.proxyuser.#USER#.groups * - hadoop.kms.proxyusers.#USER#.hosts + hadoop.kms.proxyuser.#USER#.hosts * +---+ From c0c7e6fabd573df85791d7ec4c536fd48280883f Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Tue, 16 Sep 2014 23:36:10 -0700 Subject: [PATCH 04/10] HADOOP-11062. CryptoCodec testcases requiring OpenSSL should be run only if -Pnative is used. (asuresh via tucu) --- .../hadoop-common/CHANGES.txt | 3 +++ hadoop-common-project/hadoop-common/pom.xml | 3 +++ .../apache/hadoop/crypto/TestCryptoCodec.java | 18 ++++++++++++++++-- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 7 +++++++ 4 files changed, 29 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 11151f0223c..f0fcab5580f 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -821,6 +821,9 @@ Release 2.6.0 - UNRELEASED HADOOP-11097. kms docs say proxyusers, not proxyuser for config params. (clamb via tucu) + HADOOP-11062. CryptoCodec testcases requiring OpenSSL should be run + only if -Pnative is used. (asuresh via tucu) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index ae495be0e65..0183e292c8a 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -375,6 +375,7 @@ ${startKdc} ${kdc.resource.dir} + ${runningWithNative} @@ -507,6 +508,7 @@ false + true @@ -626,6 +628,7 @@ false + true true diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java index 298f4ef8b0f..79987cec37c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java @@ -59,7 +59,14 @@ public class TestCryptoCodec { @Test(timeout=120000) public void testJceAesCtrCryptoCodec() throws Exception { - Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); + if (!"true".equalsIgnoreCase(System.getProperty("runningWithNative"))) { + LOG.warn("Skipping since test was not run with -Pnative flag"); + Assume.assumeTrue(false); + } + if (!NativeCodeLoader.buildSupportsOpenssl()) { + LOG.warn("Skipping test since openSSL library not loaded"); + Assume.assumeTrue(false); + } Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason()); cryptoCodecTest(conf, seed, 0, jceCodecClass, jceCodecClass); cryptoCodecTest(conf, seed, count, jceCodecClass, jceCodecClass); @@ -68,7 +75,14 @@ public class TestCryptoCodec { @Test(timeout=120000) public void testOpensslAesCtrCryptoCodec() throws Exception { - Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); + if (!"true".equalsIgnoreCase(System.getProperty("runningWithNative"))) { + LOG.warn("Skipping since test was not run with -Pnative flag"); + Assume.assumeTrue(false); + } + if (!NativeCodeLoader.buildSupportsOpenssl()) { + LOG.warn("Skipping test since openSSL library not loaded"); + Assume.assumeTrue(false); + } Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason()); cryptoCodecTest(conf, seed, 0, opensslCodecClass, opensslCodecClass); cryptoCodecTest(conf, seed, count, opensslCodecClass, opensslCodecClass); diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index b1707fef000..dade77d7347 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -209,6 +209,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ${startKdc} ${kdc.resource.dir} + ${runningWithNative} @@ -386,6 +387,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> windows + + true + @@ -476,6 +480,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> false + + true + From e4ddb6da15420d5c13ec7ec99fed1e44b32290b0 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Tue, 16 Sep 2014 21:29:09 -0700 Subject: [PATCH 05/10] HADOOP-11099. KMS return HTTP UNAUTHORIZED 401 on ACL failure. (tucu) --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../hadoop/crypto/key/kms/server/KMSExceptionsProvider.java | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index f0fcab5580f..a1dca6611a3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -824,6 +824,8 @@ Release 2.6.0 - UNRELEASED HADOOP-11062. CryptoCodec testcases requiring OpenSSL should be run only if -Pnative is used. (asuresh via tucu) + HADOOP-11099. KMS return HTTP UNAUTHORIZED 401 on ACL failure. (tucu) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java index 77b78ee783c..5cb088567c9 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java @@ -79,7 +79,7 @@ public class KMSExceptionsProvider implements ExceptionMapper { // we don't audit here because we did it already when checking access doAudit = false; } else if (throwable instanceof AuthorizationException) { - status = Response.Status.UNAUTHORIZED; + status = Response.Status.FORBIDDEN; // we don't audit here because we did it already when checking access doAudit = false; } else if (throwable instanceof AccessControlException) { From 0a495bef5cd675dce4c928cb5331588bb198accf Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Tue, 16 Sep 2014 21:21:17 -0700 Subject: [PATCH 06/10] HADOOP-11016. KMS should support signing cookies with zookeeper secret manager. (tucu) --- hadoop-common-project/hadoop-kms/pom.xml | 5 + .../hadoop-kms/src/main/conf/kms-site.xml | 57 ++++++ .../kms/server/KMSAuthenticationFilter.java | 7 +- .../hadoop-kms/src/site/apt/index.apt.vm | 155 +++++++++++---- .../hadoop/crypto/key/kms/server/TestKMS.java | 5 +- .../crypto/key/kms/server/TestKMSWithZK.java | 179 ++++++++++++++++++ 6 files changed, 367 insertions(+), 41 deletions(-) create mode 100644 hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSWithZK.java diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 2c225cb18eb..e6b21aad6ce 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -187,6 +187,11 @@ metrics-core compile + + org.apache.curator + curator-test + test + diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml b/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml index 20896fc2873..f55ce5fb6df 100644 --- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml +++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml @@ -68,4 +68,61 @@ + + + + hadoop.kms.authentication.signer.secret.provider + random + + Indicates how the secret to sign the authentication cookies will be + stored. Options are 'random' (default), 'string' and 'zookeeper'. + If using a setup with multiple KMS instances, 'zookeeper' should be used. + + + + + + + hadoop.kms.authentication.signer.secret.provider.zookeeper.path + /hadoop-kms/hadoop-auth-signature-secret + + The Zookeeper ZNode path where the KMS instances will store and retrieve + the secret from. + + + + + hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string + #HOSTNAME#:#PORT#,... + + The Zookeeper connection string, a list of hostnames and port comma + separated. + + + + + hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type + kerberos + + The Zookeeper authentication type, 'none' or 'sasl' (Kerberos). + + + + + hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab + /etc/hadoop/conf/kms.keytab + + The absolute path for the Kerberos keytab with the credentials to + connect to Zookeeper. + + + + + hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal + kms/#HOSTNAME# + + The Kerberos service principal used to connect to Zookeeper. + + + diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java index 4df6db54084..79652f35ad2 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java @@ -46,7 +46,8 @@ import java.util.Properties; @InterfaceAudience.Private public class KMSAuthenticationFilter extends DelegationTokenAuthenticationFilter { - private static final String CONF_PREFIX = KMSConfiguration.CONFIG_PREFIX + + + public static final String CONFIG_PREFIX = KMSConfiguration.CONFIG_PREFIX + "authentication."; @Override @@ -56,9 +57,9 @@ public class KMSAuthenticationFilter Configuration conf = KMSWebApp.getConfiguration(); for (Map.Entry entry : conf) { String name = entry.getKey(); - if (name.startsWith(CONF_PREFIX)) { + if (name.startsWith(CONFIG_PREFIX)) { String value = conf.get(name); - name = name.substring(CONF_PREFIX.length()); + name = name.substring(CONFIG_PREFIX.length()); props.setProperty(name, value); } } diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index d70f2a6d62e..5fded9282c7 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -448,16 +448,16 @@ $ keytool -genkey -alias tomcat -keyalg RSA KMS supports access control for all non-read operations at the Key level. All Key Access operations are classified as : - * MANAGEMENT - createKey, deleteKey, rolloverNewVersion + * MANAGEMENT - createKey, deleteKey, rolloverNewVersion - * GENERATE_EEK - generateEncryptedKey, warmUpEncryptedKeys + * GENERATE_EEK - generateEncryptedKey, warmUpEncryptedKeys - * DECRYPT_EEK - decryptEncryptedKey; + * DECRYPT_EEK - decryptEncryptedKey - * READ - getKeyVersion, getKeyVersions, getMetadata, getKeysMetadata, - getCurrentKey; + * READ - getKeyVersion, getKeyVersions, getMetadata, getKeysMetadata, + getCurrentKey - * ALL - all of the above; + * ALL - all of the above These can be defined in the KMS <<>> as follows @@ -554,41 +554,124 @@ $ keytool -genkey -alias tomcat -keyalg RSA KMS delegation token secret manager can be configured with the following properties: - +---+ - - hadoop.kms.authentication.delegation-token.update-interval.sec - 86400 - - How often the master key is rotated, in seconds. Default value 1 day. - - ++---+ + + hadoop.kms.authentication.delegation-token.update-interval.sec + 86400 + + How often the master key is rotated, in seconds. Default value 1 day. + + - - hadoop.kms.authentication.delegation-token.max-lifetime.sec - 604800 - - Maximum lifetime of a delagation token, in seconds. Default value 7 days. - - + + hadoop.kms.authentication.delegation-token.max-lifetime.sec + 604800 + + Maximum lifetime of a delagation token, in seconds. Default value 7 days. + + - - hadoop.kms.authentication.delegation-token.renew-interval.sec - 86400 - - Renewal interval of a delagation token, in seconds. Default value 1 day. - - + + hadoop.kms.authentication.delegation-token.renew-interval.sec + 86400 + + Renewal interval of a delagation token, in seconds. Default value 1 day. + + - - hadoop.kms.authentication.delegation-token.removal-scan-interval.sec - 3600 - - Scan interval to remove expired delegation tokens. - - - +---+ + + hadoop.kms.authentication.delegation-token.removal-scan-interval.sec + 3600 + + Scan interval to remove expired delegation tokens. + + ++---+ +** Using Multiple Instances of KMS Behind a Load-Balancer or VIP + + KMS supports multiple KMS instances behind a load-balancer or VIP for + scalability and for HA purposes. + + When using multiple KMS instances behind a load-balancer or VIP, requests from + the same user may be handled by different KMS instances. + + KMS instances behind a load-balancer or VIP must be specially configured to + work properly as a single logical service. + +*** HTTP Kerberos Principals Configuration + + TBD + +*** HTTP Authentication Signature + + KMS uses Hadoop Authentication for HTTP authentication. Hadoop Authentication + issues a signed HTTP Cookie once the client has authenticated successfully. + This HTTP Cookie has an expiration time, after which it will trigger a new + authentication sequence. This is done to avoid triggering the authentication + on every HTTP request of a client. + + A KMS instance must verify the HTTP Cookie signatures signed by other KMS + instances. To do this all KMS instances must share the signing secret. + + This secret sharing can be done using a Zookeeper service which is configured + in KMS with the following properties in the <<>>: + ++---+ + + hadoop.kms.authentication.signer.secret.provider + zookeeper + + Indicates how the secret to sign the authentication cookies will be + stored. Options are 'random' (default), 'string' and 'zookeeper'. + If using a setup with multiple KMS instances, 'zookeeper' should be used. + + + + hadoop.kms.authentication.signer.secret.provider.zookeeper.path + /hadoop-kms/hadoop-auth-signature-secret + + The Zookeeper ZNode path where the KMS instances will store and retrieve + the secret from. + + + + hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string + #HOSTNAME#:#PORT#,... + + The Zookeeper connection string, a list of hostnames and port comma + separated. + + + + hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type + kerberos + + The Zookeeper authentication type, 'none' or 'sasl' (Kerberos). + + + + hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab + /etc/hadoop/conf/kms.keytab + + The absolute path for the Kerberos keytab with the credentials to + connect to Zookeeper. + + + + hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal + kms/#HOSTNAME# + + The Kerberos service principal used to connect to Zookeeper. + + ++---+ + +*** Delegation Tokens + + TBD + ** KMS HTTP REST API *** Create a Key diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index f4f9fead63e..cdb3c7f5098 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -123,7 +123,8 @@ public class TestKMS { return conf; } - protected void writeConf(File confDir, Configuration conf) throws Exception { + public static void writeConf(File confDir, Configuration conf) + throws Exception { Writer writer = new FileWriter(new File(confDir, KMSConfiguration.KMS_SITE_XML)); conf.writeXml(writer); @@ -139,7 +140,7 @@ public class TestKMS { writer.close(); } - protected URI createKMSUri(URL kmsUrl) throws Exception { + public static URI createKMSUri(URL kmsUrl) throws Exception { String str = kmsUrl.toString(); str = str.replaceFirst("://", "@"); return new URI("kms://" + str); diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSWithZK.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSWithZK.java new file mode 100644 index 00000000000..59b00023f40 --- /dev/null +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSWithZK.java @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto.key.kms.server; + +import org.apache.curator.test.TestingServer; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; +import org.apache.hadoop.crypto.key.KeyProvider.Options; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; +import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension; +import org.apache.hadoop.crypto.key.kms.KMSClientProvider; +import org.apache.hadoop.crypto.key.kms.KMSRESTConstants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.util.ZKSignerSecretProvider; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import javax.security.auth.Subject; +import javax.security.auth.kerberos.KerberosPrincipal; +import javax.security.auth.login.AppConfigurationEntry; +import javax.security.auth.login.LoginContext; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.Writer; +import java.net.HttpURLConnection; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.SocketTimeoutException; +import java.net.URI; +import java.net.URL; +import java.security.Principal; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; + +public class TestKMSWithZK { + + protected Configuration createBaseKMSConf(File keyStoreDir) throws Exception { + Configuration conf = new Configuration(false); + conf.set("hadoop.security.key.provider.path", + "jceks://file@" + new Path(keyStoreDir.getAbsolutePath(), + "kms.keystore").toUri()); + conf.set("hadoop.kms.authentication.type", "simple"); + conf.setBoolean(KMSConfiguration.KEY_AUTHORIZATION_ENABLE, false); + + conf.set(KMSACLs.Type.GET_KEYS.getAclConfigKey(), "foo"); + return conf; + } + + @Test + public void testMultipleKMSInstancesWithZKSigner() throws Exception { + final File testDir = TestKMS.getTestDir(); + Configuration conf = createBaseKMSConf(testDir); + + TestingServer zkServer = new TestingServer(); + zkServer.start(); + + MiniKMS kms1 = null; + MiniKMS kms2 = null; + + conf.set(KMSAuthenticationFilter.CONFIG_PREFIX + + AuthenticationFilter.SIGNER_SECRET_PROVIDER, "zookeeper"); + conf.set(KMSAuthenticationFilter.CONFIG_PREFIX + + ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING, + zkServer.getConnectString()); + conf.set(KMSAuthenticationFilter.CONFIG_PREFIX + + ZKSignerSecretProvider.ZOOKEEPER_PATH, "/secret"); + TestKMS.writeConf(testDir, conf); + + try { + kms1 = new MiniKMS.Builder() + .setKmsConfDir(testDir).setLog4jConfFile("log4j.properties").build(); + kms1.start(); + + kms2 = new MiniKMS.Builder() + .setKmsConfDir(testDir).setLog4jConfFile("log4j.properties").build(); + kms2.start(); + + final URL url1 = new URL(kms1.getKMSUrl().toExternalForm() + + KMSRESTConstants.SERVICE_VERSION + "/" + + KMSRESTConstants.KEYS_NAMES_RESOURCE); + final URL url2 = new URL(kms2.getKMSUrl().toExternalForm() + + KMSRESTConstants.SERVICE_VERSION + "/" + + KMSRESTConstants.KEYS_NAMES_RESOURCE); + + final DelegationTokenAuthenticatedURL.Token token = + new DelegationTokenAuthenticatedURL.Token(); + final DelegationTokenAuthenticatedURL aUrl = + new DelegationTokenAuthenticatedURL(); + + UserGroupInformation ugiFoo = UserGroupInformation.createUserForTesting( + "foo", new String[]{"gfoo"}); + UserGroupInformation ugiBar = UserGroupInformation.createUserForTesting( + "bar", new String[]{"gBar"}); + + ugiFoo.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + HttpURLConnection conn = aUrl.openConnection(url1, token); + Assert.assertEquals(HttpURLConnection.HTTP_OK, + conn.getResponseCode()); + return null; + } + }); + + ugiBar.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + HttpURLConnection conn = aUrl.openConnection(url2, token); + Assert.assertEquals(HttpURLConnection.HTTP_OK, + conn.getResponseCode()); + return null; + } + }); + + ugiBar.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final DelegationTokenAuthenticatedURL.Token emptyToken = + new DelegationTokenAuthenticatedURL.Token(); + HttpURLConnection conn = aUrl.openConnection(url2, emptyToken); + Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN, + conn.getResponseCode()); + return null; + } + }); + + } finally { + if (kms2 != null) { + kms2.stop(); + } + if (kms1 != null) { + kms1.stop(); + } + zkServer.stop(); + } + + } + +} From d9a86031a077184d429dd5463e7da156df112011 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Tue, 16 Sep 2014 23:07:01 -0700 Subject: [PATCH 07/10] HADOOP-10982 --- .../crypto/key/kms/KMSClientProvider.java | 3 ++ .../hadoop-kms/src/site/apt/index.apt.vm | 26 ++++++++- .../hadoop/crypto/key/kms/server/TestKMS.java | 54 +++++++++++++++---- 3 files changed, 72 insertions(+), 11 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index 899b6c44dc7..a97463ac881 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -45,6 +45,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; +import java.lang.reflect.UndeclaredThrowableException; import java.net.HttpURLConnection; import java.net.SocketTimeoutException; import java.net.URI; @@ -400,6 +401,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension, }); } catch (IOException ex) { throw ex; + } catch (UndeclaredThrowableException ex) { + throw new IOException(ex.getUndeclaredThrowable()); } catch (Exception ex) { throw new IOException(ex); } diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index 5fded9282c7..682f4795d89 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -602,7 +602,31 @@ $ keytool -genkey -alias tomcat -keyalg RSA *** HTTP Kerberos Principals Configuration - TBD + When KMS instances are behind a load-balancer or VIP, clients will use the + hostname of the VIP. For Kerberos SPNEGO authentication, the hostname of the + URL is used to construct the Kerberos service name of the server, + <<>>. This means that all KMS instances must have have a + Kerberos service name with the load-balancer or VIP hostname. + + In order to be able to access directly a specific KMS instance, the KMS + instance must also have Kebero service name with its own hostname. This is + require for monitoring and admin purposes. + + Both Kerberos service principal credentials (for the load-balancer/VIP + hostname and for the actual KMS instance hostname) must be in the keytab file + configured for authentication. And the principal name specified in the + configuration must be '*'. For example: + ++---+ + + hadoop.kms.authentication.kerberos.principal + * + ++---+ + + <> If using HTTPS, the SSL certificate used by the KMS instance must + be configured to support multiple hostnames (see Java 7 + <<> SAN extension support for details on how to do this). *** HTTP Authentication Signature diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index cdb3c7f5098..42afe19a73f 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -32,6 +32,7 @@ import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.junit.AfterClass; @@ -209,6 +210,7 @@ public class TestKMS { keytab = new File(kdcDir, "keytab"); List principals = new ArrayList(); principals.add("HTTP/localhost"); + principals.add("HTTP/127.0.0.1"); principals.add("client"); principals.add("hdfs"); principals.add("otheradmin"); @@ -251,8 +253,8 @@ public class TestKMS { } } - public void testStartStop(final boolean ssl, final boolean kerberos) - throws Exception { + public void testStartStop(final boolean ssl, final boolean kerberos, + final boolean multipleServerPrincipals) throws Exception { Configuration conf = new Configuration(); if (kerberos) { conf.set("hadoop.security.authentication", "kerberos"); @@ -278,7 +280,12 @@ public class TestKMS { conf.set("hadoop.kms.authentication.type", "kerberos"); conf.set("hadoop.kms.authentication.kerberos.keytab", keytab.getAbsolutePath()); - conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost"); + if (multipleServerPrincipals) { + conf.set("hadoop.kms.authentication.kerberos.principal", "*"); + } else { + conf.set("hadoop.kms.authentication.kerberos.principal", + "HTTP/localhost"); + } conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); } @@ -291,21 +298,42 @@ public class TestKMS { URL url = getKMSUrl(); Assert.assertEquals(keystore != null, url.getProtocol().equals("https")); - final URI uri = createKMSUri(getKMSUrl()); if (kerberos) { for (String user : new String[]{"client", "client/host"}) { doAs(user, new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - final KeyProvider kp = new KMSClientProvider(uri, conf); + URI uri = createKMSUri(getKMSUrl()); + KeyProvider kp = new KMSClientProvider(uri, conf); // getKeys() empty Assert.assertTrue(kp.getKeys().isEmpty()); + + if (!ssl) { + String url = getKMSUrl().toString(); + url = url.replace("localhost", "127.0.0.1"); + uri = createKMSUri(new URL(url)); + if (multipleServerPrincipals) { + kp = new KMSClientProvider(uri, conf); + // getKeys() empty + Assert.assertTrue(kp.getKeys().isEmpty()); + } else { + kp = new KMSClientProvider(uri, conf); + try { + kp.getKeys().isEmpty(); + Assert.fail(); + } catch (IOException ex) { + Assert.assertEquals(AuthenticationException.class, + ex.getCause().getClass()); + } + } + } return null; } }); } } else { + URI uri = createKMSUri(getKMSUrl()); KeyProvider kp = new KMSClientProvider(uri, conf); // getKeys() empty Assert.assertTrue(kp.getKeys().isEmpty()); @@ -317,22 +345,27 @@ public class TestKMS { @Test public void testStartStopHttpPseudo() throws Exception { - testStartStop(false, false); + testStartStop(false, false, false); } @Test public void testStartStopHttpsPseudo() throws Exception { - testStartStop(true, false); + testStartStop(true, false, false); } @Test public void testStartStopHttpKerberos() throws Exception { - testStartStop(false, true); + testStartStop(false, true, false); } @Test public void testStartStopHttpsKerberos() throws Exception { - testStartStop(true, true); + testStartStop(true, true, false); + } + + @Test + public void testStartStopHttpsKerberosMultiplePrincipals() throws Exception { + testStartStop(false, true, true); } @Test @@ -1340,7 +1373,8 @@ public class TestKMS { KeyProvider kp = new KMSClientProvider(uri, conf); kp.createKey("kA", new KeyProvider.Options(conf)); } catch (IOException ex) { - System.out.println(ex.getMessage()); + Assert.assertEquals(AuthenticationException.class, + ex.getCause().getClass()); } doAs("client", new PrivilegedExceptionAction() { From 3f8f860cc65e179dd5766fea4d21cf30fa4b96e3 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Wed, 17 Sep 2014 11:11:15 -0700 Subject: [PATCH 08/10] Revert "HADOOP-10982" This reverts commit d9a86031a077184d429dd5463e7da156df112011. --- .../crypto/key/kms/KMSClientProvider.java | 3 -- .../hadoop-kms/src/site/apt/index.apt.vm | 26 +-------- .../hadoop/crypto/key/kms/server/TestKMS.java | 54 ++++--------------- 3 files changed, 11 insertions(+), 72 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index a97463ac881..899b6c44dc7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -45,7 +45,6 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; -import java.lang.reflect.UndeclaredThrowableException; import java.net.HttpURLConnection; import java.net.SocketTimeoutException; import java.net.URI; @@ -401,8 +400,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension, }); } catch (IOException ex) { throw ex; - } catch (UndeclaredThrowableException ex) { - throw new IOException(ex.getUndeclaredThrowable()); } catch (Exception ex) { throw new IOException(ex); } diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index 682f4795d89..5fded9282c7 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -602,31 +602,7 @@ $ keytool -genkey -alias tomcat -keyalg RSA *** HTTP Kerberos Principals Configuration - When KMS instances are behind a load-balancer or VIP, clients will use the - hostname of the VIP. For Kerberos SPNEGO authentication, the hostname of the - URL is used to construct the Kerberos service name of the server, - <<>>. This means that all KMS instances must have have a - Kerberos service name with the load-balancer or VIP hostname. - - In order to be able to access directly a specific KMS instance, the KMS - instance must also have Kebero service name with its own hostname. This is - require for monitoring and admin purposes. - - Both Kerberos service principal credentials (for the load-balancer/VIP - hostname and for the actual KMS instance hostname) must be in the keytab file - configured for authentication. And the principal name specified in the - configuration must be '*'. For example: - -+---+ - - hadoop.kms.authentication.kerberos.principal - * - -+---+ - - <> If using HTTPS, the SSL certificate used by the KMS instance must - be configured to support multiple hostnames (see Java 7 - <<> SAN extension support for details on how to do this). + TBD *** HTTP Authentication Signature diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index 42afe19a73f..cdb3c7f5098 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -32,7 +32,6 @@ import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.junit.AfterClass; @@ -210,7 +209,6 @@ public class TestKMS { keytab = new File(kdcDir, "keytab"); List principals = new ArrayList(); principals.add("HTTP/localhost"); - principals.add("HTTP/127.0.0.1"); principals.add("client"); principals.add("hdfs"); principals.add("otheradmin"); @@ -253,8 +251,8 @@ public class TestKMS { } } - public void testStartStop(final boolean ssl, final boolean kerberos, - final boolean multipleServerPrincipals) throws Exception { + public void testStartStop(final boolean ssl, final boolean kerberos) + throws Exception { Configuration conf = new Configuration(); if (kerberos) { conf.set("hadoop.security.authentication", "kerberos"); @@ -280,12 +278,7 @@ public class TestKMS { conf.set("hadoop.kms.authentication.type", "kerberos"); conf.set("hadoop.kms.authentication.kerberos.keytab", keytab.getAbsolutePath()); - if (multipleServerPrincipals) { - conf.set("hadoop.kms.authentication.kerberos.principal", "*"); - } else { - conf.set("hadoop.kms.authentication.kerberos.principal", - "HTTP/localhost"); - } + conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost"); conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); } @@ -298,42 +291,21 @@ public class TestKMS { URL url = getKMSUrl(); Assert.assertEquals(keystore != null, url.getProtocol().equals("https")); + final URI uri = createKMSUri(getKMSUrl()); if (kerberos) { for (String user : new String[]{"client", "client/host"}) { doAs(user, new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - URI uri = createKMSUri(getKMSUrl()); - KeyProvider kp = new KMSClientProvider(uri, conf); + final KeyProvider kp = new KMSClientProvider(uri, conf); // getKeys() empty Assert.assertTrue(kp.getKeys().isEmpty()); - - if (!ssl) { - String url = getKMSUrl().toString(); - url = url.replace("localhost", "127.0.0.1"); - uri = createKMSUri(new URL(url)); - if (multipleServerPrincipals) { - kp = new KMSClientProvider(uri, conf); - // getKeys() empty - Assert.assertTrue(kp.getKeys().isEmpty()); - } else { - kp = new KMSClientProvider(uri, conf); - try { - kp.getKeys().isEmpty(); - Assert.fail(); - } catch (IOException ex) { - Assert.assertEquals(AuthenticationException.class, - ex.getCause().getClass()); - } - } - } return null; } }); } } else { - URI uri = createKMSUri(getKMSUrl()); KeyProvider kp = new KMSClientProvider(uri, conf); // getKeys() empty Assert.assertTrue(kp.getKeys().isEmpty()); @@ -345,27 +317,22 @@ public class TestKMS { @Test public void testStartStopHttpPseudo() throws Exception { - testStartStop(false, false, false); + testStartStop(false, false); } @Test public void testStartStopHttpsPseudo() throws Exception { - testStartStop(true, false, false); + testStartStop(true, false); } @Test public void testStartStopHttpKerberos() throws Exception { - testStartStop(false, true, false); + testStartStop(false, true); } @Test public void testStartStopHttpsKerberos() throws Exception { - testStartStop(true, true, false); - } - - @Test - public void testStartStopHttpsKerberosMultiplePrincipals() throws Exception { - testStartStop(false, true, true); + testStartStop(true, true); } @Test @@ -1373,8 +1340,7 @@ public class TestKMS { KeyProvider kp = new KMSClientProvider(uri, conf); kp.createKey("kA", new KeyProvider.Options(conf)); } catch (IOException ex) { - Assert.assertEquals(AuthenticationException.class, - ex.getCause().getClass()); + System.out.println(ex.getMessage()); } doAs("client", new PrivilegedExceptionAction() { From 8a7671d7539bff0566cb87f2b347f71bcf148977 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Wed, 17 Sep 2014 11:11:33 -0700 Subject: [PATCH 09/10] Revert "HADOOP-11016. KMS should support signing cookies with zookeeper secret manager. (tucu)" This reverts commit 0a495bef5cd675dce4c928cb5331588bb198accf. --- hadoop-common-project/hadoop-kms/pom.xml | 5 - .../hadoop-kms/src/main/conf/kms-site.xml | 57 ------ .../kms/server/KMSAuthenticationFilter.java | 7 +- .../hadoop-kms/src/site/apt/index.apt.vm | 155 ++++----------- .../hadoop/crypto/key/kms/server/TestKMS.java | 5 +- .../crypto/key/kms/server/TestKMSWithZK.java | 179 ------------------ 6 files changed, 41 insertions(+), 367 deletions(-) delete mode 100644 hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSWithZK.java diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index e6b21aad6ce..2c225cb18eb 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -187,11 +187,6 @@ metrics-core compile - - org.apache.curator - curator-test - test - diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml b/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml index f55ce5fb6df..20896fc2873 100644 --- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml +++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-site.xml @@ -68,61 +68,4 @@ - - - - hadoop.kms.authentication.signer.secret.provider - random - - Indicates how the secret to sign the authentication cookies will be - stored. Options are 'random' (default), 'string' and 'zookeeper'. - If using a setup with multiple KMS instances, 'zookeeper' should be used. - - - - - - - hadoop.kms.authentication.signer.secret.provider.zookeeper.path - /hadoop-kms/hadoop-auth-signature-secret - - The Zookeeper ZNode path where the KMS instances will store and retrieve - the secret from. - - - - - hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string - #HOSTNAME#:#PORT#,... - - The Zookeeper connection string, a list of hostnames and port comma - separated. - - - - - hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type - kerberos - - The Zookeeper authentication type, 'none' or 'sasl' (Kerberos). - - - - - hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab - /etc/hadoop/conf/kms.keytab - - The absolute path for the Kerberos keytab with the credentials to - connect to Zookeeper. - - - - - hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal - kms/#HOSTNAME# - - The Kerberos service principal used to connect to Zookeeper. - - - diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java index 79652f35ad2..4df6db54084 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java @@ -46,8 +46,7 @@ import java.util.Properties; @InterfaceAudience.Private public class KMSAuthenticationFilter extends DelegationTokenAuthenticationFilter { - - public static final String CONFIG_PREFIX = KMSConfiguration.CONFIG_PREFIX + + private static final String CONF_PREFIX = KMSConfiguration.CONFIG_PREFIX + "authentication."; @Override @@ -57,9 +56,9 @@ public class KMSAuthenticationFilter Configuration conf = KMSWebApp.getConfiguration(); for (Map.Entry entry : conf) { String name = entry.getKey(); - if (name.startsWith(CONFIG_PREFIX)) { + if (name.startsWith(CONF_PREFIX)) { String value = conf.get(name); - name = name.substring(CONFIG_PREFIX.length()); + name = name.substring(CONF_PREFIX.length()); props.setProperty(name, value); } } diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index 5fded9282c7..d70f2a6d62e 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -448,16 +448,16 @@ $ keytool -genkey -alias tomcat -keyalg RSA KMS supports access control for all non-read operations at the Key level. All Key Access operations are classified as : - * MANAGEMENT - createKey, deleteKey, rolloverNewVersion + * MANAGEMENT - createKey, deleteKey, rolloverNewVersion - * GENERATE_EEK - generateEncryptedKey, warmUpEncryptedKeys + * GENERATE_EEK - generateEncryptedKey, warmUpEncryptedKeys - * DECRYPT_EEK - decryptEncryptedKey + * DECRYPT_EEK - decryptEncryptedKey; - * READ - getKeyVersion, getKeyVersions, getMetadata, getKeysMetadata, - getCurrentKey + * READ - getKeyVersion, getKeyVersions, getMetadata, getKeysMetadata, + getCurrentKey; - * ALL - all of the above + * ALL - all of the above; These can be defined in the KMS <<>> as follows @@ -554,124 +554,41 @@ $ keytool -genkey -alias tomcat -keyalg RSA KMS delegation token secret manager can be configured with the following properties: -+---+ - - hadoop.kms.authentication.delegation-token.update-interval.sec - 86400 - - How often the master key is rotated, in seconds. Default value 1 day. - - + +---+ + + hadoop.kms.authentication.delegation-token.update-interval.sec + 86400 + + How often the master key is rotated, in seconds. Default value 1 day. + + - - hadoop.kms.authentication.delegation-token.max-lifetime.sec - 604800 - - Maximum lifetime of a delagation token, in seconds. Default value 7 days. - - + + hadoop.kms.authentication.delegation-token.max-lifetime.sec + 604800 + + Maximum lifetime of a delagation token, in seconds. Default value 7 days. + + - - hadoop.kms.authentication.delegation-token.renew-interval.sec - 86400 - - Renewal interval of a delagation token, in seconds. Default value 1 day. - - + + hadoop.kms.authentication.delegation-token.renew-interval.sec + 86400 + + Renewal interval of a delagation token, in seconds. Default value 1 day. + + - - hadoop.kms.authentication.delegation-token.removal-scan-interval.sec - 3600 - - Scan interval to remove expired delegation tokens. - - -+---+ + + hadoop.kms.authentication.delegation-token.removal-scan-interval.sec + 3600 + + Scan interval to remove expired delegation tokens. + + + +---+ -** Using Multiple Instances of KMS Behind a Load-Balancer or VIP - - KMS supports multiple KMS instances behind a load-balancer or VIP for - scalability and for HA purposes. - - When using multiple KMS instances behind a load-balancer or VIP, requests from - the same user may be handled by different KMS instances. - - KMS instances behind a load-balancer or VIP must be specially configured to - work properly as a single logical service. - -*** HTTP Kerberos Principals Configuration - - TBD - -*** HTTP Authentication Signature - - KMS uses Hadoop Authentication for HTTP authentication. Hadoop Authentication - issues a signed HTTP Cookie once the client has authenticated successfully. - This HTTP Cookie has an expiration time, after which it will trigger a new - authentication sequence. This is done to avoid triggering the authentication - on every HTTP request of a client. - - A KMS instance must verify the HTTP Cookie signatures signed by other KMS - instances. To do this all KMS instances must share the signing secret. - - This secret sharing can be done using a Zookeeper service which is configured - in KMS with the following properties in the <<>>: - -+---+ - - hadoop.kms.authentication.signer.secret.provider - zookeeper - - Indicates how the secret to sign the authentication cookies will be - stored. Options are 'random' (default), 'string' and 'zookeeper'. - If using a setup with multiple KMS instances, 'zookeeper' should be used. - - - - hadoop.kms.authentication.signer.secret.provider.zookeeper.path - /hadoop-kms/hadoop-auth-signature-secret - - The Zookeeper ZNode path where the KMS instances will store and retrieve - the secret from. - - - - hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string - #HOSTNAME#:#PORT#,... - - The Zookeeper connection string, a list of hostnames and port comma - separated. - - - - hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type - kerberos - - The Zookeeper authentication type, 'none' or 'sasl' (Kerberos). - - - - hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab - /etc/hadoop/conf/kms.keytab - - The absolute path for the Kerberos keytab with the credentials to - connect to Zookeeper. - - - - hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal - kms/#HOSTNAME# - - The Kerberos service principal used to connect to Zookeeper. - - -+---+ - -*** Delegation Tokens - - TBD - ** KMS HTTP REST API *** Create a Key diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index cdb3c7f5098..f4f9fead63e 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -123,8 +123,7 @@ public class TestKMS { return conf; } - public static void writeConf(File confDir, Configuration conf) - throws Exception { + protected void writeConf(File confDir, Configuration conf) throws Exception { Writer writer = new FileWriter(new File(confDir, KMSConfiguration.KMS_SITE_XML)); conf.writeXml(writer); @@ -140,7 +139,7 @@ public class TestKMS { writer.close(); } - public static URI createKMSUri(URL kmsUrl) throws Exception { + protected URI createKMSUri(URL kmsUrl) throws Exception { String str = kmsUrl.toString(); str = str.replaceFirst("://", "@"); return new URI("kms://" + str); diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSWithZK.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSWithZK.java deleted file mode 100644 index 59b00023f40..00000000000 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSWithZK.java +++ /dev/null @@ -1,179 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.crypto.key.kms.server; - -import org.apache.curator.test.TestingServer; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; -import org.apache.hadoop.crypto.key.KeyProvider.Options; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; -import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension; -import org.apache.hadoop.crypto.key.kms.KMSClientProvider; -import org.apache.hadoop.crypto.key.kms.KMSRESTConstants; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.minikdc.MiniKdc; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import org.apache.hadoop.security.authentication.util.ZKSignerSecretProvider; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -import javax.security.auth.Subject; -import javax.security.auth.kerberos.KerberosPrincipal; -import javax.security.auth.login.AppConfigurationEntry; -import javax.security.auth.login.LoginContext; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.io.Writer; -import java.net.HttpURLConnection; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.net.SocketTimeoutException; -import java.net.URI; -import java.net.URL; -import java.security.Principal; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Callable; - -public class TestKMSWithZK { - - protected Configuration createBaseKMSConf(File keyStoreDir) throws Exception { - Configuration conf = new Configuration(false); - conf.set("hadoop.security.key.provider.path", - "jceks://file@" + new Path(keyStoreDir.getAbsolutePath(), - "kms.keystore").toUri()); - conf.set("hadoop.kms.authentication.type", "simple"); - conf.setBoolean(KMSConfiguration.KEY_AUTHORIZATION_ENABLE, false); - - conf.set(KMSACLs.Type.GET_KEYS.getAclConfigKey(), "foo"); - return conf; - } - - @Test - public void testMultipleKMSInstancesWithZKSigner() throws Exception { - final File testDir = TestKMS.getTestDir(); - Configuration conf = createBaseKMSConf(testDir); - - TestingServer zkServer = new TestingServer(); - zkServer.start(); - - MiniKMS kms1 = null; - MiniKMS kms2 = null; - - conf.set(KMSAuthenticationFilter.CONFIG_PREFIX + - AuthenticationFilter.SIGNER_SECRET_PROVIDER, "zookeeper"); - conf.set(KMSAuthenticationFilter.CONFIG_PREFIX + - ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING, - zkServer.getConnectString()); - conf.set(KMSAuthenticationFilter.CONFIG_PREFIX + - ZKSignerSecretProvider.ZOOKEEPER_PATH, "/secret"); - TestKMS.writeConf(testDir, conf); - - try { - kms1 = new MiniKMS.Builder() - .setKmsConfDir(testDir).setLog4jConfFile("log4j.properties").build(); - kms1.start(); - - kms2 = new MiniKMS.Builder() - .setKmsConfDir(testDir).setLog4jConfFile("log4j.properties").build(); - kms2.start(); - - final URL url1 = new URL(kms1.getKMSUrl().toExternalForm() + - KMSRESTConstants.SERVICE_VERSION + "/" + - KMSRESTConstants.KEYS_NAMES_RESOURCE); - final URL url2 = new URL(kms2.getKMSUrl().toExternalForm() + - KMSRESTConstants.SERVICE_VERSION + "/" + - KMSRESTConstants.KEYS_NAMES_RESOURCE); - - final DelegationTokenAuthenticatedURL.Token token = - new DelegationTokenAuthenticatedURL.Token(); - final DelegationTokenAuthenticatedURL aUrl = - new DelegationTokenAuthenticatedURL(); - - UserGroupInformation ugiFoo = UserGroupInformation.createUserForTesting( - "foo", new String[]{"gfoo"}); - UserGroupInformation ugiBar = UserGroupInformation.createUserForTesting( - "bar", new String[]{"gBar"}); - - ugiFoo.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - HttpURLConnection conn = aUrl.openConnection(url1, token); - Assert.assertEquals(HttpURLConnection.HTTP_OK, - conn.getResponseCode()); - return null; - } - }); - - ugiBar.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - HttpURLConnection conn = aUrl.openConnection(url2, token); - Assert.assertEquals(HttpURLConnection.HTTP_OK, - conn.getResponseCode()); - return null; - } - }); - - ugiBar.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - final DelegationTokenAuthenticatedURL.Token emptyToken = - new DelegationTokenAuthenticatedURL.Token(); - HttpURLConnection conn = aUrl.openConnection(url2, emptyToken); - Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN, - conn.getResponseCode()); - return null; - } - }); - - } finally { - if (kms2 != null) { - kms2.stop(); - } - if (kms1 != null) { - kms1.stop(); - } - zkServer.stop(); - } - - } - -} From ea4e2e843ecadd8019ea35413f4a34b97a424923 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 17 Sep 2014 11:23:47 -0700 Subject: [PATCH 10/10] HDFS-6705. Create an XAttr that disallows the HDFS admin from accessing a file. (clamb via wang) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/common/HdfsServerConstants.java | 3 +- .../hdfs/server/namenode/FSDirectory.java | 42 ++++- .../hdfs/server/namenode/FSNamesystem.java | 24 ++- .../namenode/XAttrPermissionFilter.java | 14 ++ .../src/site/apt/ExtendedAttributes.apt.vm | 3 +- .../hdfs/server/namenode/FSXAttrBaseTest.java | 148 ++++++++++++++++-- .../src/test/resources/testXAttrConf.xml | 73 +++++++++ 8 files changed, 287 insertions(+), 23 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 752e77869ca..567a6abb771 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -468,6 +468,9 @@ Release 2.6.0 - UNRELEASED HDFS-6851. Refactor EncryptionZoneWithId and EncryptionZone. (clamb via wang) + HDFS-6705. Create an XAttr that disallows the HDFS admin from accessing a + file. (clamb via wang) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 98c6398c2cb..106f4893d60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -299,5 +299,6 @@ public final class HdfsServerConstants { "raw.hdfs.crypto.encryption.zone"; public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO = "raw.hdfs.crypto.file.encryption.info"; + public static final String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER = + "security.hdfs.unreadable.by.superuser"; } - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 836ebd23b8f..e33832d35be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; import static org.apache.hadoop.util.Time.now; import java.io.Closeable; @@ -90,6 +91,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import org.apache.hadoop.security.AccessControlException; /** * Both FSDirectory and FSNamesystem manage the state of the namespace. @@ -128,6 +130,8 @@ public class FSDirectory implements Closeable { DFSUtil.string2Bytes(DOT_INODES_STRING); private final XAttr KEYID_XATTR = XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null); + private final XAttr UNREADABLE_BY_SUPERUSER_XATTR = + XAttrHelper.buildXAttr(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER, null); INodeDirectory rootDir; private final FSNamesystem namesystem; @@ -2611,7 +2615,8 @@ public class FSDirectory implements Closeable { */ @VisibleForTesting List filterINodeXAttrs(final List existingXAttrs, - final List toFilter, final List filtered) { + final List toFilter, final List filtered) + throws AccessControlException { if (existingXAttrs == null || existingXAttrs.isEmpty() || toFilter == null || toFilter.isEmpty()) { return existingXAttrs; @@ -2627,6 +2632,10 @@ public class FSDirectory implements Closeable { XAttr filter = it.next(); Preconditions.checkArgument(!KEYID_XATTR.equalsIgnoreValue(filter), "The encryption zone xattr should never be deleted."); + if (UNREADABLE_BY_SUPERUSER_XATTR.equalsIgnoreValue(filter)) { + throw new AccessControlException("The xattr '" + + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' can not be deleted."); + } if (a.equalsIgnoreValue(filter)) { add = false; it.remove(); @@ -2765,16 +2774,23 @@ public class FSDirectory implements Closeable { int snapshotId = iip.getLatestSnapshotId(); List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); List newXAttrs = setINodeXAttrs(existingXAttrs, xAttrs, flag); + final boolean isFile = inode.isFile(); - /* - * If we're adding the encryption zone xattr, then add src to the list - * of encryption zones. - */ for (XAttr xattr : newXAttrs) { final String xaName = XAttrHelper.getPrefixName(xattr); + + /* + * If we're adding the encryption zone xattr, then add src to the list + * of encryption zones. + */ if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { ezManager.addEncryptionZone(inode.getId(), new String(xattr.getValue())); } + + if (!isFile && SECURITY_XATTR_UNREADABLE_BY_SUPERUSER.equals(xaName)) { + throw new IOException("Can only set '" + + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' on a file."); + } } XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId); @@ -2865,12 +2881,26 @@ public class FSDirectory implements Closeable { INodesInPath iip = getLastINodeInPath(srcs, true); INode inode = resolveLastINode(src, iip); int snapshotId = iip.getPathSnapshotId(); - return XAttrStorage.readINodeXAttrs(inode, snapshotId); + return unprotectedGetXAttrs(inode, snapshotId); } finally { readUnlock(); } } + List getXAttrs(INode inode, int snapshotId) throws IOException { + readLock(); + try { + return unprotectedGetXAttrs(inode, snapshotId); + } finally { + readUnlock(); + } + } + + private List unprotectedGetXAttrs(INode inode, int snapshotId) + throws IOException { + return XAttrStorage.readINodeXAttrs(inode, snapshotId); + } + private static INode resolveLastINode(String src, INodesInPath iip) throws FileNotFoundException { INode inode = iip.getLastINode(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index de2fdea0554..a85f04800ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -88,6 +88,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; import static org.apache.hadoop.util.Time.now; import java.io.BufferedWriter; @@ -166,6 +167,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.UnknownCipherSuiteException; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; @@ -1839,6 +1841,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, final INodesInPath iip = dir.getLastINodeInPath(src); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); + if (isPermissionEnabled) { + checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId()); + } if (!iip.isSnapshot() //snapshots are readonly, so don't update atime. && doAccessTime && isAccessTimeSupported()) { final long now = now(); @@ -6147,6 +6152,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats, checkPermission(pc, path, false, null, null, access, null); } + private void checkUnreadableBySuperuser(FSPermissionChecker pc, + INode inode, int snapshotId) + throws IOException { + for (XAttr xattr : dir.getXAttrs(inode, snapshotId)) { + if (XAttrHelper.getPrefixName(xattr). + equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { + if (pc.isSuperUser()) { + throw new AccessControlException("Access is denied for " + + pc.getUser() + " since the superuser is not allowed to " + + "perform this operation."); + } + } + } + } + private void checkParentAccess(FSPermissionChecker pc, String path, FsAction access) throws AccessControlException, UnresolvedLinkException { @@ -8910,7 +8930,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, AccessControlException { if (isPermissionEnabled && xAttr.getNameSpace() == XAttr.NameSpace.USER) { final INode inode = dir.getINode(src); - if (inode.isDirectory() && inode.getFsPermission().getStickyBit()) { + if (inode != null && + inode.isDirectory() && + inode.getFsPermission().getStickyBit()) { if (!pc.isSuperUser()) { checkOwner(pc, src); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index 237f9d3d5ee..79dabb33899 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -28,6 +28,8 @@ import org.apache.hadoop.security.AccessControlException; import com.google.common.collect.Lists; import com.google.common.base.Preconditions; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; + /** * There are four types of extended attributes defined by the * following namespaces: @@ -70,6 +72,15 @@ public class XAttrPermissionFilter { isRawPath && isSuperUser) { return; } + if (XAttrHelper.getPrefixName(xAttr). + equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { + if (xAttr.getValue() != null) { + throw new AccessControlException("Attempt to set a value for '" + + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + + "'. Values are not allowed for this xattr."); + } + return; + } throw new AccessControlException("User doesn't have permission for xattr: " + XAttrHelper.getPrefixName(xAttr)); } @@ -104,6 +115,9 @@ public class XAttrPermissionFilter { } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isSuperUser && isRawPath) { filteredXAttrs.add(xAttr); + } else if (XAttrHelper.getPrefixName(xAttr). + equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { + filteredXAttrs.add(xAttr); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm index 0a99fe50ee6..109e988a863 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm @@ -38,7 +38,7 @@ Extended Attributes in HDFS The <<>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods, and is reserved for implementing internal HDFS features. - The <<>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods. It is currently unused. + The <<>> namespace is reserved for internal HDFS use. This namespace is generally not accessible through userspace methods. One particular use of <<>> is the <<>> extended attribute. This xattr can only be set on files, and it will prevent the superuser from reading the file's contents. The superuser can still read and modify file metadata, such as the owner, permissions, etc. This xattr can be set and accessed by any user, assuming normal filesystem permissions. This xattr is also write-once, and cannot be removed once set. This xattr does not allow a value to be set. The <<>> namespace is reserved for internal system attributes that sometimes need to be exposed. Like <<>> namespace attributes they are not visible to the user except when <<>>/<<>> is called on a file or directory in the <<>> HDFS directory hierarchy. These attributes can only be accessed by the superuser. An example of where <<>> namespace extended attributes are used is the <<>> utility. Encryption zone meta data is stored in <<>> extended attributes, so as long as the administrator uses <<>> pathnames in source and target, the encrypted files in the encryption zones are transparently copied. @@ -82,7 +82,6 @@ Extended Attributes in HDFS * {Configuration options} - HDFS supports extended attributes out of the box, without additional configuration. Administrators could potentially be interested in the options limiting the number of xattrs per inode and the size of xattrs, since xattrs increase the on-disk and in-memory space consumption of an inode. * <<>> diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java index 9c484006247..e21e34ca173 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java @@ -45,7 +45,9 @@ import static org.apache.hadoop.fs.permission.AclEntryType.USER; import static org.apache.hadoop.fs.permission.FsAction.ALL; import static org.apache.hadoop.fs.permission.FsAction.READ; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import org.junit.After; @@ -64,14 +66,14 @@ import com.google.common.collect.Maps; */ public class FSXAttrBaseTest { - private static final int MAX_SIZE = 16; - protected static MiniDFSCluster dfsCluster; protected static Configuration conf; private static int pathCount = 0; protected static Path path; + protected static Path filePath; protected static Path rawPath; - + protected static Path rawFilePath; + // XAttrs protected static final String name1 = "user.a1"; protected static final byte[] value1 = {0x31, 0x32, 0x33}; @@ -82,6 +84,10 @@ public class FSXAttrBaseTest { protected static final String name4 = "user.a4"; protected static final String raw1 = "raw.a1"; protected static final String raw2 = "raw.a2"; + protected static final String security1 = + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; + + private static final int MAX_SIZE = security1.length(); protected FileSystem fs; @@ -111,7 +117,9 @@ public class FSXAttrBaseTest { public void setUp() throws Exception { pathCount += 1; path = new Path("/p" + pathCount); + filePath = new Path(path, "file"); rawPath = new Path("/.reserved/raw/p" + pathCount); + rawFilePath = new Path(rawPath, "file"); initFileSystem(); } @@ -133,16 +141,17 @@ public class FSXAttrBaseTest { Map expectedXAttrs = Maps.newHashMap(); expectedXAttrs.put(name1, value1); expectedXAttrs.put(name2, null); - doTestCreateXAttr(path, expectedXAttrs); + expectedXAttrs.put(security1, null); + doTestCreateXAttr(filePath, expectedXAttrs); expectedXAttrs.put(raw1, value1); - doTestCreateXAttr(rawPath, expectedXAttrs); + doTestCreateXAttr(rawFilePath, expectedXAttrs); } private void doTestCreateXAttr(Path usePath, Map expectedXAttrs) throws Exception { - FileSystem.mkdirs(fs, usePath, FsPermission.createImmutable((short)0750)); + DFSTestUtil.createFile(fs, usePath, 8192, (short) 1, 0xFEED); fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); - + Map xattrs = fs.getXAttrs(usePath); Assert.assertEquals(xattrs.size(), 1); Assert.assertArrayEquals(value1, xattrs.get(name1)); @@ -194,9 +203,7 @@ public class FSXAttrBaseTest { Assert.assertArrayEquals(val, xattrs.get(ent.getKey())); } - for (Map.Entry ent : expectedXAttrs.entrySet()) { - fs.removeXAttr(usePath, ent.getKey()); - } + fs.delete(usePath, false); } /** @@ -344,13 +351,13 @@ public class FSXAttrBaseTest { fs.removeXAttr(path, name3); // Name length exceeds max limit - String longName = "user.0123456789abcdefX"; + String longName = "user.0123456789abcdefX0123456789abcdefX0123456789abcdef"; try { fs.setXAttr(path, longName, null); Assert.fail("Setting xattr should fail if name is too long."); } catch (IOException e) { GenericTestUtils.assertExceptionContains("XAttr is too big", e); - GenericTestUtils.assertExceptionContains("total size is 17", e); + GenericTestUtils.assertExceptionContains("total size is 50", e); } // Value length exceeds max limit @@ -360,7 +367,7 @@ public class FSXAttrBaseTest { Assert.fail("Setting xattr should fail if value is too long."); } catch (IOException e) { GenericTestUtils.assertExceptionContains("XAttr is too big", e); - GenericTestUtils.assertExceptionContains("total size is 17", e); + GenericTestUtils.assertExceptionContains("total size is 38", e); } // Name + value exactly equal the limit @@ -1116,6 +1123,121 @@ public class FSXAttrBaseTest { } } + /** + * This tests the "unreadable by superuser" xattr which denies access to a + * file for the superuser. See HDFS-6705 for details. + */ + @Test(timeout = 120000) + public void testUnreadableBySuperuserXAttr() throws Exception { + // Run tests as superuser... + doTestUnreadableBySuperuserXAttr(fs, true); + + // ...and again as non-superuser + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] { "mygroup" }); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + doTestUnreadableBySuperuserXAttr(userFs, false); + return null; + } + }); + } + + private void doTestUnreadableBySuperuserXAttr(FileSystem userFs, + boolean expectOpenFailure) throws Exception { + + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0777)); + DFSTestUtil.createFile(userFs, filePath, 8192, (short) 1, 0xFEED); + try { + doTUBSXAInt(userFs, expectOpenFailure); + // Deleting the file is allowed. + userFs.delete(filePath, false); + } finally { + fs.delete(path, true); + } + } + + private void doTUBSXAInt(FileSystem userFs, boolean expectOpenFailure) + throws Exception { + + // Test that xattr can't be set on a dir + try { + userFs.setXAttr(path, security1, null, EnumSet.of(XAttrSetFlag.CREATE)); + } catch (IOException e) { + // WebHDFS throws IOException instead of RemoteException + GenericTestUtils.assertExceptionContains("Can only set '" + + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' on a file", e); + } + + // Test that xattr can actually be set. Repeatedly. + userFs.setXAttr(filePath, security1, null, + EnumSet.of(XAttrSetFlag.CREATE)); + verifySecurityXAttrExists(userFs); + userFs.setXAttr(filePath, security1, null, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + verifySecurityXAttrExists(userFs); + + // Test that the xattr can't be deleted by anyone. + try { + userFs.removeXAttr(filePath, security1); + Assert.fail("Removing security xattr should fail."); + } catch (AccessControlException e) { + GenericTestUtils.assertExceptionContains("The xattr '" + + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' can not be deleted.", e); + } + + // Test that xattr can be read. + verifySecurityXAttrExists(userFs); + + // Test that a value can't be set for the xattr. + try { + userFs.setXAttr(filePath, security1, + value1,EnumSet.of(XAttrSetFlag.REPLACE)); + fail("Should have thrown on attempt to set value"); + } catch (AccessControlException e) { + GenericTestUtils.assertExceptionContains("Values are not allowed", e); + } + + // Test that unreadable by superuser xattr appears in listXAttrs results + // (for superuser and non-superuser) + final List xattrNames = userFs.listXAttrs(filePath); + assertTrue(xattrNames.contains(security1)); + assertTrue(xattrNames.size() == 1); + + verifyFileAccess(userFs, expectOpenFailure); + + // Rename of the file is allowed by anyone. + Path toPath = new Path(filePath.toString() + "x"); + userFs.rename(filePath, toPath); + userFs.rename(toPath, filePath); + } + + private void verifySecurityXAttrExists(FileSystem userFs) throws Exception { + try { + final Map xattrs = userFs.getXAttrs(filePath); + Assert.assertEquals(1, xattrs.size()); + Assert.assertNotNull(xattrs.get(security1)); + Assert.assertArrayEquals("expected empty byte[] from getXAttr", + new byte[0], userFs.getXAttr(filePath, security1)); + + } catch (AccessControlException e) { + fail("getXAttrs failed but expected it to succeed"); + } + } + + private void verifyFileAccess(FileSystem userFs, boolean expectOpenFailure) + throws Exception { + // Test that a file with the xattr can or can't be opened. + try { + userFs.open(filePath); + assertFalse("open succeeded but expected it to fail", expectOpenFailure); + } catch (AccessControlException e) { + assertTrue("open failed but expected it to succeed", expectOpenFailure); + } + } + /** * Creates a FileSystem for the super-user. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml index 3414f5719dd..9c66cba8487 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml @@ -124,6 +124,79 @@ + + + setfattr : Add the unreadable by superuser xattr to security namespace + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n security.hdfs.unreadable.by.superuser /file1 + -fs NAMENODE -getfattr -d /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + security.hdfs.unreadable.by.superuser + + + + + + setfattr : Try to delete the unreadable by superuser xattr from security namespace + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n security.hdfs.unreadable.by.superuser /file1 + -fs NAMENODE -setfattr -x security.hdfs.unreadable.by.superuser /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + can not be deleted + + + + + + setfattr : Try to read a file protected by the unreadable by superuser xattr + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n security.hdfs.unreadable.by.superuser /file1 + -fs NAMENODE -get /file1 /tmp/file1 + + + -fs NAMENODE -rm /file1 + rm /tmp/file1 + + + + SubstringComparator + Access is denied + + + + + + setfattr : Try to add a value to the unreadable by superuser xattr + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n security.hdfs.unreadable.by.superuser /file1 + -fs NAMENODE -setfattr -n security.hdfs.unreadable.by.superuser -v 1234 /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + Values are not allowed + + + setfattr : Add an xattr of raw namespace