diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3bf9d4baf5c..a1dca6611a3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -815,6 +815,17 @@ Release 2.6.0 - UNRELEASED
HADOOP-11088. Unittest TestKeyShell, TestCredShell and TestKMS assume UNIX
path separator for JECKS key store path. (Xiaoyu Yao via cnauroth)
+ HADOOP-11096. KMS: KeyAuthorizationKeyProvider should verify the keyversion
+ belongs to the keyname on decrypt. (tucu)
+
+ HADOOP-11097. kms docs say proxyusers, not proxyuser for config params.
+ (clamb via tucu)
+
+ HADOOP-11062. CryptoCodec testcases requiring OpenSSL should be run
+ only if -Pnative is used. (asuresh via tucu)
+
+ HADOOP-11099. KMS return HTTP UNAUTHORIZED 401 on ACL failure. (tucu)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index ae495be0e65..0183e292c8a 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -375,6 +375,7 @@
${startKdc}${kdc.resource.dir}
+ ${runningWithNative}
@@ -507,6 +508,7 @@
false
+ true
@@ -626,6 +628,7 @@
false
+ truetrue
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
index fed7e9e4d97..968e341338b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
@@ -91,6 +91,8 @@ public class KeyProviderCryptoExtension extends
* returned EncryptedKeyVersion will only partially be populated; it is not
* necessarily suitable for operations besides decryption.
*
+ * @param keyName Key name of the encryption key use to encrypt the
+ * encrypted key.
* @param encryptionKeyVersionName Version name of the encryption key used
* to encrypt the encrypted key.
* @param encryptedKeyIv Initialization vector of the encrypted
@@ -100,12 +102,12 @@ public class KeyProviderCryptoExtension extends
* @param encryptedKeyMaterial Key material of the encrypted key.
* @return EncryptedKeyVersion suitable for decryption.
*/
- public static EncryptedKeyVersion createForDecryption(String
- encryptionKeyVersionName, byte[] encryptedKeyIv,
+ public static EncryptedKeyVersion createForDecryption(String keyName,
+ String encryptionKeyVersionName, byte[] encryptedKeyIv,
byte[] encryptedKeyMaterial) {
KeyVersion encryptedKeyVersion = new KeyVersion(null, EEK,
encryptedKeyMaterial);
- return new EncryptedKeyVersion(null, encryptionKeyVersionName,
+ return new EncryptedKeyVersion(keyName, encryptionKeyVersionName,
encryptedKeyIv, encryptedKeyVersion);
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
index 298f4ef8b0f..79987cec37c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
@@ -59,7 +59,14 @@ public class TestCryptoCodec {
@Test(timeout=120000)
public void testJceAesCtrCryptoCodec() throws Exception {
- Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl());
+ if (!"true".equalsIgnoreCase(System.getProperty("runningWithNative"))) {
+ LOG.warn("Skipping since test was not run with -Pnative flag");
+ Assume.assumeTrue(false);
+ }
+ if (!NativeCodeLoader.buildSupportsOpenssl()) {
+ LOG.warn("Skipping test since openSSL library not loaded");
+ Assume.assumeTrue(false);
+ }
Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason());
cryptoCodecTest(conf, seed, 0, jceCodecClass, jceCodecClass);
cryptoCodecTest(conf, seed, count, jceCodecClass, jceCodecClass);
@@ -68,7 +75,14 @@ public class TestCryptoCodec {
@Test(timeout=120000)
public void testOpensslAesCtrCryptoCodec() throws Exception {
- Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl());
+ if (!"true".equalsIgnoreCase(System.getProperty("runningWithNative"))) {
+ LOG.warn("Skipping since test was not run with -Pnative flag");
+ Assume.assumeTrue(false);
+ }
+ if (!NativeCodeLoader.buildSupportsOpenssl()) {
+ LOG.warn("Skipping test since openSSL library not loaded");
+ Assume.assumeTrue(false);
+ }
Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason());
cryptoCodecTest(conf, seed, 0, opensslCodecClass, opensslCodecClass);
cryptoCodecTest(conf, seed, count, opensslCodecClass, opensslCodecClass);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java
index 70ec6feaf10..62e3310173d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java
@@ -121,7 +121,7 @@ public class TestKeyProviderCryptoExtension {
// Test the createForDecryption factory method
EncryptedKeyVersion eek2 =
- EncryptedKeyVersion.createForDecryption(
+ EncryptedKeyVersion.createForDecryption(eek.getEncryptionKeyName(),
eek.getEncryptionKeyVersionName(), eek.getEncryptedKeyIv(),
eek.getEncryptedKeyVersion().getMaterial());
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
index 77b78ee783c..5cb088567c9 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
@@ -79,7 +79,7 @@ public class KMSExceptionsProvider implements ExceptionMapper {
// we don't audit here because we did it already when checking access
doAudit = false;
} else if (throwable instanceof AuthorizationException) {
- status = Response.Status.UNAUTHORIZED;
+ status = Response.Status.FORBIDDEN;
// we don't audit here because we did it already when checking access
doAudit = false;
} else if (throwable instanceof AccessControlException) {
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java
index fe908e38c94..bccec4aeee5 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java
@@ -192,9 +192,21 @@ public class KeyAuthorizationKeyProvider extends KeyProviderCryptoExtension {
return provider.generateEncryptedKey(encryptionKeyName);
}
+ private void verifyKeyVersionBelongsToKey(EncryptedKeyVersion ekv)
+ throws IOException {
+ String kn = ekv.getEncryptionKeyName();
+ String kvn = ekv.getEncryptionKeyVersionName();
+ KeyVersion kv = provider.getKeyVersion(kvn);
+ if (!kv.getName().equals(kn)) {
+ throw new IllegalArgumentException(String.format(
+ "KeyVersion '%s' does not belong to the key '%s'", kvn, kn));
+ }
+ }
+
@Override
public KeyVersion decryptEncryptedKey(EncryptedKeyVersion encryptedKeyVersion)
throws IOException, GeneralSecurityException {
+ verifyKeyVersionBelongsToKey(encryptedKeyVersion);
doAccessCheck(
encryptedKeyVersion.getEncryptionKeyName(), KeyOpType.DECRYPT_EEK);
return provider.decryptEncryptedKey(encryptedKeyVersion);
diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
index c76ca3b30ad..d70f2a6d62e 100644
--- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
@@ -197,22 +197,22 @@ hadoop-${project.version} $ sbin/kms.sh start
*** KMS Proxyuser Configuration
- Each proxyusers must be configured in <<>> using the
+ Each proxyuser must be configured in <<>> using the
following properties:
+---+
- hadoop.kms.proxyusers.#USER#.users
+ hadoop.kms.proxyuser.#USER#.users*
- hadoop.kms.proxyusers.#USER#.groups
+ hadoop.kms.proxyuser.#USER#.groups*
- hadoop.kms.proxyusers.#USER#.hosts
+ hadoop.kms.proxyuser.#USER#.hosts*
+---+
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKeyAuthorizationKeyProvider.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKeyAuthorizationKeyProvider.java
index a79926a9cd3..1db3d706883 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKeyAuthorizationKeyProvider.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKeyAuthorizationKeyProvider.java
@@ -215,4 +215,57 @@ public class TestKeyAuthorizationKeyProvider {
return options;
}
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testDecryptWithKeyVersionNameKeyMismatch() throws Exception {
+ final Configuration conf = new Configuration();
+ KeyProvider kp =
+ new UserProvider.Factory().createProvider(new URI("user:///"), conf);
+ KeyACLs mock = mock(KeyACLs.class);
+ when(mock.isACLPresent("testKey", KeyOpType.MANAGEMENT)).thenReturn(true);
+ when(mock.isACLPresent("testKey", KeyOpType.GENERATE_EEK)).thenReturn(true);
+ when(mock.isACLPresent("testKey", KeyOpType.DECRYPT_EEK)).thenReturn(true);
+ when(mock.isACLPresent("testKey", KeyOpType.ALL)).thenReturn(true);
+ UserGroupInformation u1 = UserGroupInformation.createRemoteUser("u1");
+ UserGroupInformation u2 = UserGroupInformation.createRemoteUser("u2");
+ UserGroupInformation u3 = UserGroupInformation.createRemoteUser("u3");
+ UserGroupInformation sudo = UserGroupInformation.createRemoteUser("sudo");
+ when(mock.hasAccessToKey("testKey", u1,
+ KeyOpType.MANAGEMENT)).thenReturn(true);
+ when(mock.hasAccessToKey("testKey", u2,
+ KeyOpType.GENERATE_EEK)).thenReturn(true);
+ when(mock.hasAccessToKey("testKey", u3,
+ KeyOpType.DECRYPT_EEK)).thenReturn(true);
+ when(mock.hasAccessToKey("testKey", sudo,
+ KeyOpType.ALL)).thenReturn(true);
+ final KeyProviderCryptoExtension kpExt =
+ new KeyAuthorizationKeyProvider(
+ KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp),
+ mock);
+
+ sudo.doAs(
+ new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ Options opt = newOptions(conf);
+ Map m = new HashMap();
+ m.put("key.acl.name", "testKey");
+ opt.setAttributes(m);
+ KeyVersion kv =
+ kpExt.createKey("foo", SecureRandom.getSeed(16), opt);
+ kpExt.rollNewVersion(kv.getName());
+ kpExt.rollNewVersion(kv.getName(), SecureRandom.getSeed(16));
+ EncryptedKeyVersion ekv = kpExt.generateEncryptedKey(kv.getName());
+ ekv = EncryptedKeyVersion.createForDecryption(
+ ekv.getEncryptionKeyName() + "x",
+ ekv.getEncryptionKeyVersionName(),
+ ekv.getEncryptedKeyIv(),
+ ekv.getEncryptedKeyVersion().getMaterial());
+ kpExt.decryptEncryptedKey(ekv);
+ return null;
+ }
+ }
+ );
+ }
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 752e77869ca..567a6abb771 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -468,6 +468,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6851. Refactor EncryptionZoneWithId and EncryptionZone. (clamb via wang)
+ HDFS-6705. Create an XAttr that disallows the HDFS admin from accessing a
+ file. (clamb via wang)
+
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index b1707fef000..dade77d7347 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -209,6 +209,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
${startKdc}${kdc.resource.dir}
+ ${runningWithNative}
@@ -386,6 +387,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
windows
+
+ true
+
@@ -476,6 +480,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
false
+
+ true
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 9da8efc7480..456fac63425 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1321,7 +1321,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
" an encrypted file");
}
EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption(
- feInfo.getEzKeyVersionName(), feInfo.getIV(),
+ //TODO: here we have to put the keyName to be provided by HDFS-6987
+ null, feInfo.getEzKeyVersionName(), feInfo.getIV(),
feInfo.getEncryptedDataEncryptionKey());
try {
return provider.decryptEncryptedKey(ekv);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 98c6398c2cb..106f4893d60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -299,5 +299,6 @@ public final class HdfsServerConstants {
"raw.hdfs.crypto.encryption.zone";
public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO =
"raw.hdfs.crypto.file.encryption.info";
+ public static final String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
+ "security.hdfs.unreadable.by.superuser";
}
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index bb29d53e278..bf6c25ece47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
import static org.apache.hadoop.util.Time.now;
import java.io.Closeable;
@@ -90,6 +91,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
+import org.apache.hadoop.security.AccessControlException;
/**
* Both FSDirectory and FSNamesystem manage the state of the namespace.
@@ -128,6 +130,8 @@ public class FSDirectory implements Closeable {
DFSUtil.string2Bytes(DOT_INODES_STRING);
private final XAttr KEYID_XATTR =
XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null);
+ private final XAttr UNREADABLE_BY_SUPERUSER_XATTR =
+ XAttrHelper.buildXAttr(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER, null);
INodeDirectory rootDir;
private final FSNamesystem namesystem;
@@ -2617,7 +2621,8 @@ public class FSDirectory implements Closeable {
*/
@VisibleForTesting
List filterINodeXAttrs(final List existingXAttrs,
- final List toFilter, final List filtered) {
+ final List toFilter, final List filtered)
+ throws AccessControlException {
if (existingXAttrs == null || existingXAttrs.isEmpty() ||
toFilter == null || toFilter.isEmpty()) {
return existingXAttrs;
@@ -2633,6 +2638,10 @@ public class FSDirectory implements Closeable {
XAttr filter = it.next();
Preconditions.checkArgument(!KEYID_XATTR.equalsIgnoreValue(filter),
"The encryption zone xattr should never be deleted.");
+ if (UNREADABLE_BY_SUPERUSER_XATTR.equalsIgnoreValue(filter)) {
+ throw new AccessControlException("The xattr '" +
+ SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' can not be deleted.");
+ }
if (a.equalsIgnoreValue(filter)) {
add = false;
it.remove();
@@ -2771,16 +2780,23 @@ public class FSDirectory implements Closeable {
int snapshotId = iip.getLatestSnapshotId();
List existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
List newXAttrs = setINodeXAttrs(existingXAttrs, xAttrs, flag);
+ final boolean isFile = inode.isFile();
- /*
- * If we're adding the encryption zone xattr, then add src to the list
- * of encryption zones.
- */
for (XAttr xattr : newXAttrs) {
final String xaName = XAttrHelper.getPrefixName(xattr);
+
+ /*
+ * If we're adding the encryption zone xattr, then add src to the list
+ * of encryption zones.
+ */
if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) {
ezManager.addEncryptionZone(inode.getId(), new String(xattr.getValue()));
}
+
+ if (!isFile && SECURITY_XATTR_UNREADABLE_BY_SUPERUSER.equals(xaName)) {
+ throw new IOException("Can only set '" +
+ SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' on a file.");
+ }
}
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
@@ -2871,12 +2887,26 @@ public class FSDirectory implements Closeable {
INodesInPath iip = getLastINodeInPath(srcs, true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getPathSnapshotId();
- return XAttrStorage.readINodeXAttrs(inode, snapshotId);
+ return unprotectedGetXAttrs(inode, snapshotId);
} finally {
readUnlock();
}
}
+ List getXAttrs(INode inode, int snapshotId) throws IOException {
+ readLock();
+ try {
+ return unprotectedGetXAttrs(inode, snapshotId);
+ } finally {
+ readUnlock();
+ }
+ }
+
+ private List unprotectedGetXAttrs(INode inode, int snapshotId)
+ throws IOException {
+ return XAttrStorage.readINodeXAttrs(inode, snapshotId);
+ }
+
private static INode resolveLastINode(String src, INodesInPath iip)
throws FileNotFoundException {
INode inode = iip.getLastINode();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index bbc53b9416f..fee0d7ebb5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -90,6 +90,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
import static org.apache.hadoop.util.Time.now;
import java.io.BufferedWriter;
@@ -168,6 +169,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.UnknownCipherSuiteException;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
@@ -1871,6 +1873,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final INodesInPath iip = dir.getLastINodeInPath(src);
final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
+ if (isPermissionEnabled) {
+ checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
+ }
if (!iip.isSnapshot() //snapshots are readonly, so don't update atime.
&& doAccessTime && isAccessTimeSupported()) {
final long now = now();
@@ -6251,6 +6256,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkPermission(pc, path, false, null, null, access, null);
}
+ private void checkUnreadableBySuperuser(FSPermissionChecker pc,
+ INode inode, int snapshotId)
+ throws IOException {
+ for (XAttr xattr : dir.getXAttrs(inode, snapshotId)) {
+ if (XAttrHelper.getPrefixName(xattr).
+ equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
+ if (pc.isSuperUser()) {
+ throw new AccessControlException("Access is denied for " +
+ pc.getUser() + " since the superuser is not allowed to " +
+ "perform this operation.");
+ }
+ }
+ }
+ }
+
private void checkParentAccess(FSPermissionChecker pc,
String path, FsAction access) throws AccessControlException,
UnresolvedLinkException {
@@ -9014,7 +9034,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
AccessControlException {
if (isPermissionEnabled && xAttr.getNameSpace() == XAttr.NameSpace.USER) {
final INode inode = dir.getINode(src);
- if (inode.isDirectory() && inode.getFsPermission().getStickyBit()) {
+ if (inode != null &&
+ inode.isDirectory() &&
+ inode.getFsPermission().getStickyBit()) {
if (!pc.isSuperUser()) {
checkOwner(pc, src);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
index 237f9d3d5ee..79dabb33899 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.security.AccessControlException;
import com.google.common.collect.Lists;
import com.google.common.base.Preconditions;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
+
/**
* There are four types of extended attributes defined by the
* following namespaces:
@@ -70,6 +72,15 @@ public class XAttrPermissionFilter {
isRawPath && isSuperUser) {
return;
}
+ if (XAttrHelper.getPrefixName(xAttr).
+ equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
+ if (xAttr.getValue() != null) {
+ throw new AccessControlException("Attempt to set a value for '" +
+ SECURITY_XATTR_UNREADABLE_BY_SUPERUSER +
+ "'. Values are not allowed for this xattr.");
+ }
+ return;
+ }
throw new AccessControlException("User doesn't have permission for xattr: "
+ XAttrHelper.getPrefixName(xAttr));
}
@@ -104,6 +115,9 @@ public class XAttrPermissionFilter {
} else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
isSuperUser && isRawPath) {
filteredXAttrs.add(xAttr);
+ } else if (XAttrHelper.getPrefixName(xAttr).
+ equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
+ filteredXAttrs.add(xAttr);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
index 0a99fe50ee6..109e988a863 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
@@ -38,7 +38,7 @@ Extended Attributes in HDFS
The <<>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods, and is reserved for implementing internal HDFS features.
- The <<>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods. It is currently unused.
+ The <<>> namespace is reserved for internal HDFS use. This namespace is generally not accessible through userspace methods. One particular use of <<>> is the <<>> extended attribute. This xattr can only be set on files, and it will prevent the superuser from reading the file's contents. The superuser can still read and modify file metadata, such as the owner, permissions, etc. This xattr can be set and accessed by any user, assuming normal filesystem permissions. This xattr is also write-once, and cannot be removed once set. This xattr does not allow a value to be set.
The <<>> namespace is reserved for internal system attributes that sometimes need to be exposed. Like <<>> namespace attributes they are not visible to the user except when <<>>/<<>> is called on a file or directory in the <<>> HDFS directory hierarchy. These attributes can only be accessed by the superuser. An example of where <<>> namespace extended attributes are used is the <<>> utility. Encryption zone meta data is stored in <<>> extended attributes, so as long as the administrator uses <<>> pathnames in source and target, the encrypted files in the encryption zones are transparently copied.
@@ -82,7 +82,6 @@ Extended Attributes in HDFS
* {Configuration options}
-
HDFS supports extended attributes out of the box, without additional configuration. Administrators could potentially be interested in the options limiting the number of xattrs per inode and the size of xattrs, since xattrs increase the on-disk and in-memory space consumption of an inode.
* <<>>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 9c484006247..e21e34ca173 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -45,7 +45,9 @@ import static org.apache.hadoop.fs.permission.AclEntryType.USER;
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.fs.permission.FsAction.READ;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.junit.After;
@@ -64,14 +66,14 @@ import com.google.common.collect.Maps;
*/
public class FSXAttrBaseTest {
- private static final int MAX_SIZE = 16;
-
protected static MiniDFSCluster dfsCluster;
protected static Configuration conf;
private static int pathCount = 0;
protected static Path path;
+ protected static Path filePath;
protected static Path rawPath;
-
+ protected static Path rawFilePath;
+
// XAttrs
protected static final String name1 = "user.a1";
protected static final byte[] value1 = {0x31, 0x32, 0x33};
@@ -82,6 +84,10 @@ public class FSXAttrBaseTest {
protected static final String name4 = "user.a4";
protected static final String raw1 = "raw.a1";
protected static final String raw2 = "raw.a2";
+ protected static final String security1 =
+ SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
+
+ private static final int MAX_SIZE = security1.length();
protected FileSystem fs;
@@ -111,7 +117,9 @@ public class FSXAttrBaseTest {
public void setUp() throws Exception {
pathCount += 1;
path = new Path("/p" + pathCount);
+ filePath = new Path(path, "file");
rawPath = new Path("/.reserved/raw/p" + pathCount);
+ rawFilePath = new Path(rawPath, "file");
initFileSystem();
}
@@ -133,16 +141,17 @@ public class FSXAttrBaseTest {
Map expectedXAttrs = Maps.newHashMap();
expectedXAttrs.put(name1, value1);
expectedXAttrs.put(name2, null);
- doTestCreateXAttr(path, expectedXAttrs);
+ expectedXAttrs.put(security1, null);
+ doTestCreateXAttr(filePath, expectedXAttrs);
expectedXAttrs.put(raw1, value1);
- doTestCreateXAttr(rawPath, expectedXAttrs);
+ doTestCreateXAttr(rawFilePath, expectedXAttrs);
}
private void doTestCreateXAttr(Path usePath, Map expectedXAttrs) throws Exception {
- FileSystem.mkdirs(fs, usePath, FsPermission.createImmutable((short)0750));
+ DFSTestUtil.createFile(fs, usePath, 8192, (short) 1, 0xFEED);
fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
-
+
Map xattrs = fs.getXAttrs(usePath);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(value1, xattrs.get(name1));
@@ -194,9 +203,7 @@ public class FSXAttrBaseTest {
Assert.assertArrayEquals(val, xattrs.get(ent.getKey()));
}
- for (Map.Entry ent : expectedXAttrs.entrySet()) {
- fs.removeXAttr(usePath, ent.getKey());
- }
+ fs.delete(usePath, false);
}
/**
@@ -344,13 +351,13 @@ public class FSXAttrBaseTest {
fs.removeXAttr(path, name3);
// Name length exceeds max limit
- String longName = "user.0123456789abcdefX";
+ String longName = "user.0123456789abcdefX0123456789abcdefX0123456789abcdef";
try {
fs.setXAttr(path, longName, null);
Assert.fail("Setting xattr should fail if name is too long.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("XAttr is too big", e);
- GenericTestUtils.assertExceptionContains("total size is 17", e);
+ GenericTestUtils.assertExceptionContains("total size is 50", e);
}
// Value length exceeds max limit
@@ -360,7 +367,7 @@ public class FSXAttrBaseTest {
Assert.fail("Setting xattr should fail if value is too long.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("XAttr is too big", e);
- GenericTestUtils.assertExceptionContains("total size is 17", e);
+ GenericTestUtils.assertExceptionContains("total size is 38", e);
}
// Name + value exactly equal the limit
@@ -1116,6 +1123,121 @@ public class FSXAttrBaseTest {
}
}
+ /**
+ * This tests the "unreadable by superuser" xattr which denies access to a
+ * file for the superuser. See HDFS-6705 for details.
+ */
+ @Test(timeout = 120000)
+ public void testUnreadableBySuperuserXAttr() throws Exception {
+ // Run tests as superuser...
+ doTestUnreadableBySuperuserXAttr(fs, true);
+
+ // ...and again as non-superuser
+ final UserGroupInformation user = UserGroupInformation.
+ createUserForTesting("user", new String[] { "mygroup" });
+ user.doAs(new PrivilegedExceptionAction