From 2244459186f82e9129c8a6783f213fc586da4add Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Wed, 26 Feb 2014 22:32:27 +0000 Subject: [PATCH] HDFS-4685. Merge HDFS ACLs to branch-2. Merging changes r1569870, r1570466, r1570655, r1571573, r1571745, r1572142, r1572189 and r1572190 from trunk to branch-2. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1572308 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/hadoop/fs/FileSystem.java | 87 +- .../apache/hadoop/fs/FilterFileSystem.java | 36 + .../apache/hadoop/fs/RawLocalFileSystem.java | 6 +- .../apache/hadoop/fs/permission/AclEntry.java | 301 ++++ .../hadoop/fs/permission/AclEntryScope.java | 42 + .../hadoop/fs/permission/AclEntryType.java | 58 + .../hadoop/fs/permission/AclStatus.java | 201 +++ .../apache/hadoop/fs/permission/FsAction.java | 21 +- .../hadoop/fs/permission/FsPermission.java | 10 +- .../apache/hadoop/fs/shell/AclCommands.java | 325 ++++ .../org/apache/hadoop/fs/shell/FsCommand.java | 1 + .../java/org/apache/hadoop/fs/shell/Ls.java | 56 +- .../hadoop/fs/viewfs/ChRootedFileSystem.java | 37 +- .../hadoop/fs/viewfs/ViewFileSystem.java | 50 +- .../src/site/apt/FileSystemShell.apt.vm | 71 + .../apache/hadoop/fs/TestHarFileSystem.java | 17 + .../apache/hadoop/fs/permission/TestAcl.java | 210 +++ .../fs/permission/TestFsPermission.java | 71 +- .../hadoop/fs/shell/TestAclCommands.java | 240 +++ .../fs/viewfs/TestChRootedFileSystem.java | 43 +- .../viewfs/TestViewFileSystemDelegation.java | 63 + hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 + .../org/apache/hadoop/hdfs/DFSClient.java | 92 ++ .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 + .../hadoop/hdfs/DistributedFileSystem.java | 128 ++ .../hadoop/hdfs/protocol/AclException.java | 39 + .../hadoop/hdfs/protocol/ClientProtocol.java | 48 + .../hadoop/hdfs/protocol/LayoutVersion.java | 6 +- ...amenodeProtocolServerSideTranslatorPB.java | 99 ++ .../ClientNamenodeProtocolTranslatorPB.java | 81 + .../hadoop/hdfs/protocolPB/PBHelper.java | 94 +- .../hdfs/server/namenode/AclConfigFlag.java | 56 + .../hdfs/server/namenode/AclFeature.java | 43 + .../hdfs/server/namenode/AclStorage.java | 406 +++++ .../server/namenode/AclTransformation.java | 485 ++++++ .../hdfs/server/namenode/FSDirectory.java | 195 ++- .../hdfs/server/namenode/FSEditLog.java | 26 +- .../hdfs/server/namenode/FSEditLogLoader.java | 15 +- .../hdfs/server/namenode/FSEditLogOp.java | 204 ++- .../server/namenode/FSEditLogOpCodes.java | 1 + .../hdfs/server/namenode/FSImageFormat.java | 16 +- .../server/namenode/FSImageFormatPBINode.java | 99 +- .../namenode/FSImageFormatProtobuf.java | 5 +- .../hdfs/server/namenode/FSNamesystem.java | 128 +- .../server/namenode/FSPermissionChecker.java | 125 +- .../hadoop/hdfs/server/namenode/INode.java | 25 + .../hdfs/server/namenode/INodeAttributes.java | 13 +- .../hdfs/server/namenode/INodeDirectory.java | 26 +- .../namenode/INodeDirectoryAttributes.java | 9 +- .../hdfs/server/namenode/INodeFile.java | 14 +- .../server/namenode/INodeFileAttributes.java | 4 +- .../hdfs/server/namenode/INodeReference.java | 16 + .../namenode/INodeWithAdditionalFields.java | 39 + .../server/namenode/NameNodeRpcServer.java | 34 + .../server/namenode/ScopedAclEntries.java | 93 ++ .../snapshot/FSImageFormatPBSnapshot.java | 41 +- .../snapshot/INodeDirectorySnapshottable.java | 2 +- .../server/namenode/snapshot/Snapshot.java | 10 +- .../web/resources/NamenodeWebHdfsMethods.java | 50 +- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 42 + .../hadoop/hdfs/web/WebHdfsFileSystem.java | 52 + .../web/resources/AclPermissionParam.java | 71 + .../hadoop/hdfs/web/resources/GetOpParam.java | 1 + .../hadoop/hdfs/web/resources/PutOpParam.java | 6 + .../main/proto/ClientNamenodeProtocol.proto | 13 + .../hadoop-hdfs/src/main/proto/acl.proto | 112 ++ .../hadoop-hdfs/src/main/proto/fsimage.proto | 20 + .../src/main/resources/hdfs-default.xml | 10 + .../src/site/apt/HdfsPermissionsGuide.apt.vm | 176 ++- .../hadoop-hdfs/src/site/apt/WebHDFS.apt.vm | 205 +++ .../org/apache/hadoop/cli/TestAclCLI.java | 84 ++ .../hadoop/fs/permission/TestStickyBit.java | 386 +++-- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 4 + .../org/apache/hadoop/hdfs/TestSafeMode.java | 38 + .../hadoop/hdfs/protocolPB/TestPBHelper.java | 41 + .../hdfs/server/namenode/AclTestHelpers.java | 155 ++ .../hdfs/server/namenode/FSAclBaseTest.java | 1308 +++++++++++++++++ .../namenode/OfflineEditsViewerHelper.java | 1 + .../server/namenode/TestAclConfigFlag.java | 189 +++ .../namenode/TestAclTransformation.java | 1208 +++++++++++++++ .../server/namenode/TestFSImageWithAcl.java | 227 +++ .../namenode/TestFSPermissionChecker.java | 417 ++++++ .../hdfs/server/namenode/TestNameNodeAcl.java | 38 + .../namenode/TestNamenodeRetryCache.java | 1 + .../namenode/ha/TestRetryCacheWithHA.java | 1 + .../snapshot/TestAclWithSnapshot.java | 741 ++++++++++ .../org/apache/hadoop/hdfs/util/TestDiff.java | 3 +- .../apache/hadoop/hdfs/web/TestJsonUtil.java | 51 + .../hadoop/hdfs/web/TestWebHDFSAcl.java | 76 + .../hadoop/hdfs/web/resources/TestParam.java | 46 + .../security/TestPermissionSymlinks.java | 155 +- .../src/test/resources/editsStored | Bin 4401 -> 4329 bytes .../src/test/resources/editsStored.xml | 241 ++- .../src/test/resources/testAclCLI.xml | 976 ++++++++++++ 94 files changed, 11380 insertions(+), 433 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryScope.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryType.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestAcl.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestAclCommands.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclConfigFlag.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ScopedAclEntries.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index b3f607417e3..098a4c1fb1e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -25,8 +25,6 @@ import java.net.URI; import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; @@ -40,7 +38,6 @@ import java.util.ServiceLoader; import java.util.Set; import java.util.Stack; import java.util.TreeSet; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; @@ -51,6 +48,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.io.Text; @@ -2270,6 +2269,88 @@ public abstract class FileSystem extends Configured implements Closeable { + " doesn't support deleteSnapshot"); } + /** + * Modifies ACL entries of files and directories. This method can add new ACL + * entries or modify the permissions on existing ACL entries. All existing + * ACL entries that are not specified in this call are retained without + * changes. (Modifications are merged into the current ACL.) + * + * @param path Path to modify + * @param aclSpec List describing modifications + * @throws IOException if an ACL could not be modified + */ + public void modifyAclEntries(Path path, List aclSpec) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support modifyAclEntries"); + } + + /** + * Removes ACL entries from files and directories. Other ACL entries are + * retained. + * + * @param path Path to modify + * @param aclSpec List describing entries to remove + * @throws IOException if an ACL could not be modified + */ + public void removeAclEntries(Path path, List aclSpec) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support removeAclEntries"); + } + + /** + * Removes all default ACL entries from files and directories. + * + * @param path Path to modify + * @throws IOException if an ACL could not be modified + */ + public void removeDefaultAcl(Path path) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support removeDefaultAcl"); + } + + /** + * Removes all but the base ACL entries of files and directories. The entries + * for user, group, and others are retained for compatibility with permission + * bits. + * + * @param path Path to modify + * @throws IOException if an ACL could not be removed + */ + public void removeAcl(Path path) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support removeAcl"); + } + + /** + * Fully replaces ACL of files and directories, discarding all existing + * entries. + * + * @param path Path to modify + * @param aclSpec List describing modifications, must include entries + * for user, group, and others for compatibility with permission bits. + * @throws IOException if an ACL could not be modified + */ + public void setAcl(Path path, List aclSpec) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support setAcl"); + } + + /** + * Gets the ACL of a file or directory. + * + * @param path Path to get + * @return AclStatus describing the ACL of the file or directory + * @throws IOException if an ACL could not be read + */ + public AclStatus getAclStatus(Path path) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getAclStatus"); + } + // making it volatile to be able to do a double checked locking private volatile static boolean FILE_SYSTEMS_LOADED = false; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index 3f7b7ed5877..d45ecbbf11a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -22,9 +22,13 @@ import java.io.*; import java.net.URI; import java.net.URISyntaxException; import java.util.EnumSet; +import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.Options.ChecksumOpt; @@ -507,4 +511,36 @@ public class FilterFileSystem extends FileSystem { throws IOException { fs.deleteSnapshot(path, snapshotName); } + + @Override + public void modifyAclEntries(Path path, List aclSpec) + throws IOException { + fs.modifyAclEntries(path, aclSpec); + } + + @Override + public void removeAclEntries(Path path, List aclSpec) + throws IOException { + fs.removeAclEntries(path, aclSpec); + } + + @Override + public void removeDefaultAcl(Path path) throws IOException { + fs.removeDefaultAcl(path); + } + + @Override + public void removeAcl(Path path) throws IOException { + fs.removeAcl(path); + } + + @Override + public void setAcl(Path path, List aclSpec) throws IOException { + fs.setAcl(path, aclSpec); + } + + @Override + public AclStatus getAclStatus(Path path) throws IOException { + return fs.getAclStatus(path); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index fa4970bff01..4a3ebbdf265 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -563,8 +563,10 @@ public class RawLocalFileSystem extends FileSystem { //expected format //-rw------- 1 username groupname ... String permission = t.nextToken(); - if (permission.length() > 10) { //files with ACLs might have a '+' - permission = permission.substring(0, 10); + if (permission.length() > FsPermission.MAX_PERMISSION_LENGTH) { + //files with ACLs might have a '+' + permission = permission.substring(0, + FsPermission.MAX_PERMISSION_LENGTH); } setPermission(FsPermission.valueOf(permission)); t.nextToken(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java new file mode 100644 index 00000000000..e50be00528b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java @@ -0,0 +1,301 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.permission; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import com.google.common.base.Objects; + +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.StringUtils; + +/** + * Defines a single entry in an ACL. An ACL entry has a type (user, group, + * mask, or other), an optional name (referring to a specific user or group), a + * set of permissions (any combination of read, write and execute), and a scope + * (access or default). AclEntry instances are immutable. Use a {@link Builder} + * to create a new instance. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class AclEntry { + private final AclEntryType type; + private final String name; + private final FsAction permission; + private final AclEntryScope scope; + + /** + * Returns the ACL entry type. + * + * @return AclEntryType ACL entry type + */ + public AclEntryType getType() { + return type; + } + + /** + * Returns the optional ACL entry name. + * + * @return String ACL entry name, or null if undefined + */ + public String getName() { + return name; + } + + /** + * Returns the set of permissions in the ACL entry. + * + * @return FsAction set of permissions in the ACL entry + */ + public FsAction getPermission() { + return permission; + } + + /** + * Returns the scope of the ACL entry. + * + * @return AclEntryScope scope of the ACL entry + */ + public AclEntryScope getScope() { + return scope; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (getClass() != o.getClass()) { + return false; + } + AclEntry other = (AclEntry)o; + return Objects.equal(type, other.type) && + Objects.equal(name, other.name) && + Objects.equal(permission, other.permission) && + Objects.equal(scope, other.scope); + } + + @Override + public int hashCode() { + return Objects.hashCode(type, name, permission, scope); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + if (scope == AclEntryScope.DEFAULT) { + sb.append("default:"); + } + if (type != null) { + sb.append(type.toString().toLowerCase()); + } + sb.append(':'); + if (name != null) { + sb.append(name); + } + sb.append(':'); + if (permission != null) { + sb.append(permission.SYMBOL); + } + return sb.toString(); + } + + /** + * Builder for creating new AclEntry instances. + */ + public static class Builder { + private AclEntryType type; + private String name; + private FsAction permission; + private AclEntryScope scope = AclEntryScope.ACCESS; + + /** + * Sets the ACL entry type. + * + * @param type AclEntryType ACL entry type + * @return Builder this builder, for call chaining + */ + public Builder setType(AclEntryType type) { + this.type = type; + return this; + } + + /** + * Sets the optional ACL entry name. + * + * @param name String optional ACL entry name + * @return Builder this builder, for call chaining + */ + public Builder setName(String name) { + this.name = name; + return this; + } + + /** + * Sets the set of permissions in the ACL entry. + * + * @param permission FsAction set of permissions in the ACL entry + * @return Builder this builder, for call chaining + */ + public Builder setPermission(FsAction permission) { + this.permission = permission; + return this; + } + + /** + * Sets the scope of the ACL entry. If this method is not called, then the + * builder assumes {@link AclEntryScope#ACCESS}. + * + * @param scope AclEntryScope scope of the ACL entry + * @return Builder this builder, for call chaining + */ + public Builder setScope(AclEntryScope scope) { + this.scope = scope; + return this; + } + + /** + * Builds a new AclEntry populated with the set properties. + * + * @return AclEntry new AclEntry + */ + public AclEntry build() { + return new AclEntry(type, name, permission, scope); + } + } + + /** + * Private constructor. + * + * @param type AclEntryType ACL entry type + * @param name String optional ACL entry name + * @param permission FsAction set of permissions in the ACL entry + * @param scope AclEntryScope scope of the ACL entry + */ + private AclEntry(AclEntryType type, String name, FsAction permission, AclEntryScope scope) { + this.type = type; + this.name = name; + this.permission = permission; + this.scope = scope; + } + + /** + * Parses a string representation of an ACL spec into a list of AclEntry + * objects. Example: "user::rwx,user:foo:rw-,group::r--,other::---" + * + * @param aclSpec + * String representation of an ACL spec. + * @param includePermission + * for setAcl operations this will be true. i.e. AclSpec should + * include permissions.
+ * But for removeAcl operation it will be false. i.e. AclSpec should + * not contain permissions.
+ * Example: "user:foo,group:bar" + * @return Returns list of {@link AclEntry} parsed + */ + public static List parseAclSpec(String aclSpec, + boolean includePermission) { + List aclEntries = new ArrayList(); + Collection aclStrings = StringUtils.getStringCollection(aclSpec, + ","); + for (String aclStr : aclStrings) { + AclEntry aclEntry = parseAclEntry(aclStr, includePermission); + aclEntries.add(aclEntry); + } + return aclEntries; + } + + /** + * Parses a string representation of an ACL into a AclEntry object.
+ * + * @param aclStr + * String representation of an ACL.
+ * Example: "user:foo:rw-" + * @param includePermission + * for setAcl operations this will be true. i.e. Acl should include + * permissions.
+ * But for removeAcl operation it will be false. i.e. Acl should not + * contain permissions.
+ * Example: "user:foo,group:bar,mask::" + * @return Returns an {@link AclEntry} object + */ + public static AclEntry parseAclEntry(String aclStr, + boolean includePermission) { + AclEntry.Builder builder = new AclEntry.Builder(); + // Here "::" represent one empty string. + // StringUtils.getStringCollection() will ignore this. + String[] split = aclStr.split(":"); + + if (split.length == 0) { + throw new HadoopIllegalArgumentException("Invalid : " + aclStr); + } + int index = 0; + if ("default".equals(split[0])) { + // default entry + index++; + builder.setScope(AclEntryScope.DEFAULT); + } + + if (split.length <= index) { + throw new HadoopIllegalArgumentException("Invalid : " + aclStr); + } + + AclEntryType aclType = null; + try { + aclType = Enum.valueOf(AclEntryType.class, split[index].toUpperCase()); + builder.setType(aclType); + index++; + } catch (IllegalArgumentException iae) { + throw new HadoopIllegalArgumentException( + "Invalid type of acl in :" + aclStr); + } + + if (split.length > index) { + String name = split[index]; + if (!name.isEmpty()) { + builder.setName(name); + } + index++; + } + + if (includePermission) { + if (split.length < index) { + throw new HadoopIllegalArgumentException("Invalid : " + + aclStr); + } + String permission = split[index]; + FsAction fsAction = FsAction.getFsAction(permission); + if (null == fsAction) { + throw new HadoopIllegalArgumentException( + "Invalid permission in : " + aclStr); + } + builder.setPermission(fsAction); + index++; + } + + if (split.length > index) { + throw new HadoopIllegalArgumentException("Invalid : " + aclStr); + } + AclEntry aclEntry = builder.build(); + return aclEntry; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryScope.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryScope.java new file mode 100644 index 00000000000..6d941e7117d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryScope.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.permission; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Specifies the scope or intended usage of an ACL entry. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public enum AclEntryScope { + /** + * An ACL entry that is inspected during permission checks to enforce + * permissions. + */ + ACCESS, + + /** + * An ACL entry to be applied to a directory's children that do not otherwise + * have their own ACL defined. Unlike an access ACL entry, a default ACL + * entry is not inspected as part of permission enforcement on the directory + * that owns it. + */ + DEFAULT; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryType.java new file mode 100644 index 00000000000..ffd62d7080b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryType.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.permission; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Specifies the type of an ACL entry. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public enum AclEntryType { + /** + * An ACL entry applied to a specific user. These ACL entries can be unnamed, + * which applies to the file owner, or named, which applies to the specific + * named user. + */ + USER, + + /** + * An ACL entry applied to a specific group. These ACL entries can be + * unnamed, which applies to the file's group, or named, which applies to the + * specific named group. + */ + GROUP, + + /** + * An ACL mask entry. Mask entries are unnamed. During permission checks, + * the mask entry interacts with all ACL entries that are members of the group + * class. This consists of all named user entries, the unnamed group entry, + * and all named group entries. For each such entry, any permissions that are + * absent from the mask entry are removed from the effective permissions used + * during the permission check. + */ + MASK, + + /** + * An ACL entry that applies to all other users that were not covered by one + * of the more specific ACL entry types. + */ + OTHER; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java new file mode 100644 index 00000000000..4a7258f0a27 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.permission; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import com.google.common.base.Objects; +import com.google.common.collect.Lists; + +/** + * An AclStatus contains the ACL information of a specific file. AclStatus + * instances are immutable. Use a {@link Builder} to create a new instance. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class AclStatus { + private final String owner; + private final String group; + private final boolean stickyBit; + private final List entries; + + /** + * Returns the file owner. + * + * @return String file owner + */ + public String getOwner() { + return owner; + } + + /** + * Returns the file group. + * + * @return String file group + */ + public String getGroup() { + return group; + } + + /** + * Returns the sticky bit. + * + * @return boolean sticky bit + */ + public boolean isStickyBit() { + return stickyBit; + } + + /** + * Returns the list of all ACL entries, ordered by their natural ordering. + * + * @return List unmodifiable ordered list of all ACL entries + */ + public List getEntries() { + return entries; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (getClass() != o.getClass()) { + return false; + } + AclStatus other = (AclStatus)o; + return Objects.equal(owner, other.owner) + && Objects.equal(group, other.group) + && stickyBit == other.stickyBit + && Objects.equal(entries, other.entries); + } + + @Override + public int hashCode() { + return Objects.hashCode(owner, group, stickyBit, entries); + } + + @Override + public String toString() { + return new StringBuilder() + .append("owner: ").append(owner) + .append(", group: ").append(group) + .append(", acl: {") + .append("entries: ").append(entries) + .append(", stickyBit: ").append(stickyBit) + .append('}') + .toString(); + } + + /** + * Builder for creating new Acl instances. + */ + public static class Builder { + private String owner; + private String group; + private boolean stickyBit; + private List entries = Lists.newArrayList(); + + /** + * Sets the file owner. + * + * @param owner String file owner + * @return Builder this builder, for call chaining + */ + public Builder owner(String owner) { + this.owner = owner; + return this; + } + + /** + * Sets the file group. + * + * @param group String file group + * @return Builder this builder, for call chaining + */ + public Builder group(String group) { + this.group = group; + return this; + } + + /** + * Adds an ACL entry. + * + * @param e AclEntry entry to add + * @return Builder this builder, for call chaining + */ + public Builder addEntry(AclEntry e) { + this.entries.add(e); + return this; + } + + /** + * Adds a list of ACL entries. + * + * @param entries AclEntry entries to add + * @return Builder this builder, for call chaining + */ + public Builder addEntries(Iterable entries) { + for (AclEntry e : entries) + this.entries.add(e); + return this; + } + + /** + * Sets sticky bit. If this method is not called, then the builder assumes + * false. + * + * @param stickyBit + * boolean sticky bit + * @return Builder this builder, for call chaining + */ + public Builder stickyBit(boolean stickyBit) { + this.stickyBit = stickyBit; + return this; + } + + /** + * Builds a new AclStatus populated with the set properties. + * + * @return AclStatus new AclStatus + */ + public AclStatus build() { + return new AclStatus(owner, group, stickyBit, entries); + } + } + + /** + * Private constructor. + * + * @param file Path file associated to this ACL + * @param owner String file owner + * @param group String file group + * @param stickyBit the sticky bit + * @param entries the ACL entries + */ + private AclStatus(String owner, String group, boolean stickyBit, + Iterable entries) { + this.owner = owner; + this.group = group; + this.stickyBit = stickyBit; + this.entries = Lists.newArrayList(entries); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsAction.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsAction.java index 6fac6472434..97dcf816c16 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsAction.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsAction.java @@ -23,8 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability; /** * File system actions, e.g. read, write, etc. */ -@InterfaceAudience.LimitedPrivate({"HDFS"}) -@InterfaceStability.Unstable +@InterfaceAudience.Public +@InterfaceStability.Stable public enum FsAction { // POSIX style NONE("---"), @@ -69,4 +69,21 @@ public enum FsAction { public FsAction not() { return vals[7 - ordinal()]; } + + /** + * Get the FsAction enum for String representation of permissions + * + * @param permission + * 3-character string representation of permission. ex: rwx + * @return Returns FsAction enum if the corresponding FsAction exists for permission. + * Otherwise returns null + */ + public static FsAction getFsAction(String permission) { + for (FsAction fsAction : vals) { + if (fsAction.SYMBOL.equals(permission)) { + return fsAction; + } + } + return null; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java index 76950305ed0..28956098c79 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java @@ -48,6 +48,9 @@ public class FsPermission implements Writable { WritableFactories.setFactory(ImmutableFsPermission.class, FACTORY); } + /** Maximum acceptable length of a permission string to parse */ + public static final int MAX_PERMISSION_LENGTH = 10; + /** Create an immutable {@link FsPermission} object. */ public static FsPermission createImmutable(short permission) { return new ImmutableFsPermission(permission); @@ -319,9 +322,10 @@ public class FsPermission implements Writable { if (unixSymbolicPermission == null) { return null; } - else if (unixSymbolicPermission.length() != 10) { - throw new IllegalArgumentException("length != 10(unixSymbolicPermission=" - + unixSymbolicPermission + ")"); + else if (unixSymbolicPermission.length() != MAX_PERMISSION_LENGTH) { + throw new IllegalArgumentException(String.format( + "length != %d(unixSymbolicPermission=%s)", MAX_PERMISSION_LENGTH, + unixSymbolicPermission)); } int n = 0; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java new file mode 100644 index 00000000000..f17457cafde --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java @@ -0,0 +1,325 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.shell; + +import java.io.IOException; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; + +import com.google.common.collect.Lists; + +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; + +/** + * Acl related operations + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +class AclCommands extends FsCommand { + private static String GET_FACL = "getfacl"; + private static String SET_FACL = "setfacl"; + + public static void registerCommands(CommandFactory factory) { + factory.addClass(GetfaclCommand.class, "-" + GET_FACL); + factory.addClass(SetfaclCommand.class, "-" + SET_FACL); + } + + /** + * Implementing the '-getfacl' command for the the FsShell. + */ + public static class GetfaclCommand extends FsCommand { + public static String NAME = GET_FACL; + public static String USAGE = "[-R] "; + public static String DESCRIPTION = "Displays the Access Control Lists" + + " (ACLs) of files and directories. If a directory has a default ACL," + + " then getfacl also displays the default ACL.\n" + + "-R: List the ACLs of all files and directories recursively.\n" + + ": File or directory to list.\n"; + + @Override + protected void processOptions(LinkedList args) throws IOException { + CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "R"); + cf.parse(args); + setRecursive(cf.getOpt("R")); + if (args.isEmpty()) { + throw new HadoopIllegalArgumentException(" is missing"); + } + if (args.size() > 1) { + throw new HadoopIllegalArgumentException("Too many arguments"); + } + } + + @Override + protected void processPath(PathData item) throws IOException { + AclStatus aclStatus = item.fs.getAclStatus(item.path); + out.println("# file: " + item); + out.println("# owner: " + aclStatus.getOwner()); + out.println("# group: " + aclStatus.getGroup()); + List entries = aclStatus.getEntries(); + if (aclStatus.isStickyBit()) { + String stickyFlag = "T"; + for (AclEntry aclEntry : entries) { + if (aclEntry.getType() == AclEntryType.OTHER + && aclEntry.getScope() == AclEntryScope.ACCESS + && aclEntry.getPermission().implies(FsAction.EXECUTE)) { + stickyFlag = "t"; + break; + } + } + out.println("# flags: --" + stickyFlag); + } + + FsPermission perm = item.stat.getPermission(); + if (entries.isEmpty()) { + printMinimalAcl(perm); + } else { + printExtendedAcl(perm, entries); + } + + out.println(); + } + + /** + * Prints an extended ACL, including all extended ACL entries and also the + * base entries implied by the permission bits. + * + * @param perm FsPermission of file + * @param entries List containing ACL entries of file + */ + private void printExtendedAcl(FsPermission perm, List entries) { + // Print owner entry implied by owner permission bits. + out.println(new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER) + .setPermission(perm.getUserAction()) + .build()); + + // Print all extended access ACL entries. + boolean hasAccessAcl = false; + Iterator entryIter = entries.iterator(); + AclEntry curEntry = null; + while (entryIter.hasNext()) { + curEntry = entryIter.next(); + if (curEntry.getScope() == AclEntryScope.DEFAULT) { + break; + } + hasAccessAcl = true; + printExtendedAclEntry(curEntry, perm.getGroupAction()); + } + + // Print mask entry implied by group permission bits, or print group entry + // if there is no access ACL (only default ACL). + out.println(new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP) + .setPermission(perm.getGroupAction()) + .build()); + + // Print other entry implied by other bits. + out.println(new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER) + .setPermission(perm.getOtherAction()) + .build()); + + // Print default ACL entries. + if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) { + out.println(curEntry); + // ACL sort order guarantees default mask is the second-to-last entry. + FsAction maskPerm = entries.get(entries.size() - 2).getPermission(); + while (entryIter.hasNext()) { + printExtendedAclEntry(entryIter.next(), maskPerm); + } + } + } + + /** + * Prints a single extended ACL entry. If the mask restricts the + * permissions of the entry, then also prints the restricted version as the + * effective permissions. The mask applies to all named entries and also + * the unnamed group entry. + * + * @param entry AclEntry extended ACL entry to print + * @param maskPerm FsAction permissions in the ACL's mask entry + */ + private void printExtendedAclEntry(AclEntry entry, FsAction maskPerm) { + if (entry.getName() != null || entry.getType() == AclEntryType.GROUP) { + FsAction entryPerm = entry.getPermission(); + FsAction effectivePerm = entryPerm.and(maskPerm); + if (entryPerm != effectivePerm) { + out.println(String.format("%s\t#effective:%s", entry, + effectivePerm.SYMBOL)); + } else { + out.println(entry); + } + } else { + out.println(entry); + } + } + + /** + * Prints a minimal ACL, consisting of exactly 3 ACL entries implied by the + * permission bits. + * + * @param perm FsPermission of file + */ + private void printMinimalAcl(FsPermission perm) { + out.println(new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER) + .setPermission(perm.getUserAction()) + .build()); + out.println(new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.GROUP) + .setPermission(perm.getGroupAction()) + .build()); + out.println(new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER) + .setPermission(perm.getOtherAction()) + .build()); + } + } + + /** + * Implementing the '-setfacl' command for the the FsShell. + */ + public static class SetfaclCommand extends FsCommand { + public static String NAME = SET_FACL; + public static String USAGE = "[-R] [{-b|-k} {-m|-x } ]" + + "|[--set ]"; + public static String DESCRIPTION = "Sets Access Control Lists (ACLs)" + + " of files and directories.\n" + + "Options:\n" + + "-b :Remove all but the base ACL entries. The entries for user," + + " group and others are retained for compatibility with permission " + + "bits.\n" + + "-k :Remove the default ACL.\n" + + "-R :Apply operations to all files and directories recursively.\n" + + "-m :Modify ACL. New entries are added to the ACL, and existing" + + " entries are retained.\n" + + "-x :Remove specified ACL entries. Other ACL entries are retained.\n" + + "--set :Fully replace the ACL, discarding all existing entries." + + " The must include entries for user, group, and others" + + " for compatibility with permission bits.\n" + + ": Comma separated list of ACL entries.\n" + + ": File or directory to modify.\n"; + + CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "b", "k", "R", + "m", "x", "-set"); + List aclEntries = null; + List accessAclEntries = null; + + @Override + protected void processOptions(LinkedList args) throws IOException { + cf.parse(args); + setRecursive(cf.getOpt("R")); + // Mix of remove and modify acl flags are not allowed + boolean bothRemoveOptions = cf.getOpt("b") && cf.getOpt("k"); + boolean bothModifyOptions = cf.getOpt("m") && cf.getOpt("x"); + boolean oneRemoveOption = cf.getOpt("b") || cf.getOpt("k"); + boolean oneModifyOption = cf.getOpt("m") || cf.getOpt("x"); + boolean setOption = cf.getOpt("-set"); + if ((bothRemoveOptions || bothModifyOptions) + || (oneRemoveOption && oneModifyOption) + || (setOption && (oneRemoveOption || oneModifyOption))) { + throw new HadoopIllegalArgumentException( + "Specified flags contains both remove and modify flags"); + } + + // Only -m, -x and --set expects + if (oneModifyOption || setOption) { + if (args.size() < 2) { + throw new HadoopIllegalArgumentException(" is missing"); + } + aclEntries = AclEntry.parseAclSpec(args.removeFirst(), !cf.getOpt("x")); + } + + if (args.isEmpty()) { + throw new HadoopIllegalArgumentException(" is missing"); + } + if (args.size() > 1) { + throw new HadoopIllegalArgumentException("Too many arguments"); + } + + // In recursive mode, save a separate list of just the access ACL entries. + // Only directories may have a default ACL. When a recursive operation + // encounters a file under the specified path, it must pass only the + // access ACL entries. + if (isRecursive() && (oneModifyOption || setOption)) { + accessAclEntries = Lists.newArrayList(); + for (AclEntry entry: aclEntries) { + if (entry.getScope() == AclEntryScope.ACCESS) { + accessAclEntries.add(entry); + } + } + } + } + + @Override + protected void processPath(PathData item) throws IOException { + if (cf.getOpt("b")) { + item.fs.removeAcl(item.path); + } else if (cf.getOpt("k")) { + item.fs.removeDefaultAcl(item.path); + } else if (cf.getOpt("m")) { + List entries = getAclEntries(item); + if (!entries.isEmpty()) { + item.fs.modifyAclEntries(item.path, entries); + } + } else if (cf.getOpt("x")) { + List entries = getAclEntries(item); + if (!entries.isEmpty()) { + item.fs.removeAclEntries(item.path, entries); + } + } else if (cf.getOpt("-set")) { + List entries = getAclEntries(item); + if (!entries.isEmpty()) { + item.fs.setAcl(item.path, entries); + } + } + } + + /** + * Returns the ACL entries to use in the API call for the given path. For a + * recursive operation, returns all specified ACL entries if the item is a + * directory or just the access ACL entries if the item is a file. For a + * non-recursive operation, returns all specified ACL entries. + * + * @param item PathData path to check + * @return List ACL entries to use in the API call + */ + private List getAclEntries(PathData item) { + if (isRecursive()) { + return item.stat.isDirectory() ? aclEntries : accessAclEntries; + } else { + return aclEntries; + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java index 78b47dc8860..c4a6d80754d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java @@ -43,6 +43,7 @@ abstract public class FsCommand extends Command { * @param factory where to register the class */ public static void registerCommands(CommandFactory factory) { + factory.registerCommands(AclCommands.class); factory.registerCommands(CopyCommands.class); factory.registerCommands(Count.class); factory.registerCommands(Delete.class); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java index 76192e73b4a..a41be7ef2a3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java @@ -19,15 +19,22 @@ package org.apache.hadoop.fs.shell; import java.io.IOException; +import java.net.URI; import java.text.SimpleDateFormat; import java.util.Date; import java.util.LinkedList; +import java.util.Set; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.RpcNoSuchMethodException; + +import com.google.common.collect.Sets; /** * Get a listing of all files in that match the file patterns. @@ -65,6 +72,8 @@ class Ls extends FsCommand { protected boolean dirRecurse; protected boolean humanReadable = false; + private Set aclNotSupportedFsSet = Sets.newHashSet(); + protected String formatSize(long size) { return humanReadable ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1) @@ -107,7 +116,7 @@ class Ls extends FsCommand { FileStatus stat = item.stat; String line = String.format(lineFormat, (stat.isDirectory() ? "d" : "-"), - stat.getPermission(), + stat.getPermission() + (hasAcl(item) ? "+" : " "), (stat.isFile() ? stat.getReplication() : "-"), stat.getOwner(), stat.getGroup(), @@ -132,7 +141,7 @@ class Ls extends FsCommand { } StringBuilder fmt = new StringBuilder(); - fmt.append("%s%s "); // permission string + fmt.append("%s%s"); // permission string fmt.append("%" + maxRepl + "s "); // Do not use '%-0s' as a formatting conversion, since it will throw a // a MissingFormatWidthException if it is used in String.format(). @@ -144,6 +153,49 @@ class Ls extends FsCommand { lineFormat = fmt.toString(); } + /** + * Calls getAclStatus to determine if the given item has an ACL. For + * compatibility, this method traps errors caused by the RPC method missing + * from the server side. This would happen if the client was connected to an + * old NameNode that didn't have the ACL APIs. This method also traps the + * case of the client-side FileSystem not implementing the ACL APIs. + * FileSystem instances that do not support ACLs are remembered. This + * prevents the client from sending multiple failing RPC calls during a + * recursive ls. + * + * @param item PathData item to check + * @return boolean true if item has an ACL + * @throws IOException if there is a failure + */ + private boolean hasAcl(PathData item) throws IOException { + FileSystem fs = item.fs; + if (aclNotSupportedFsSet.contains(fs.getUri())) { + // This FileSystem failed to run the ACL API in an earlier iteration. + return false; + } + try { + return !fs.getAclStatus(item.path).getEntries().isEmpty(); + } catch (RemoteException e) { + // If this is a RpcNoSuchMethodException, then the client is connected to + // an older NameNode that doesn't support ACLs. Keep going. + IOException e2 = e.unwrapRemoteException(RpcNoSuchMethodException.class); + if (!(e2 instanceof RpcNoSuchMethodException)) { + throw e; + } + } catch (IOException e) { + // The NameNode supports ACLs, but they are not enabled. Keep going. + String message = e.getMessage(); + if (message != null && !message.contains("ACLs has been disabled")) { + throw e; + } + } catch (UnsupportedOperationException e) { + // The underlying FileSystem doesn't implement ACLs. Keep going. + } + // Remember that this FileSystem cannot support ACLs. + aclNotSupportedFsSet.add(fs.getUri()); + return false; + } + private int maxLength(int n, Object value) { return Math.max(n, (value != null) ? String.valueOf(value).length() : 0); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java index b73d3c65195..0d3be9bac30 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java @@ -20,6 +20,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.util.EnumSet; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -36,6 +37,8 @@ import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Progressable; @@ -277,7 +280,39 @@ class ChRootedFileSystem extends FilterFileSystem { throws IOException { super.setTimes(fullPath(f), mtime, atime); } - + + @Override + public void modifyAclEntries(Path path, List aclSpec) + throws IOException { + super.modifyAclEntries(fullPath(path), aclSpec); + } + + @Override + public void removeAclEntries(Path path, List aclSpec) + throws IOException { + super.removeAclEntries(fullPath(path), aclSpec); + } + + @Override + public void removeDefaultAcl(Path path) throws IOException { + super.removeDefaultAcl(fullPath(path)); + } + + @Override + public void removeAcl(Path path) throws IOException { + super.removeAcl(fullPath(path)); + } + + @Override + public void setAcl(Path path, List aclSpec) throws IOException { + super.setAcl(fullPath(path), aclSpec); + } + + @Override + public AclStatus getAclStatus(Path path) throws IOException { + return super.getAclStatus(fullPath(path)); + } + @Override public Path resolvePath(final Path p) throws IOException { return super.resolvePath(fullPath(p)); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index aec87a34c04..ac2664effdb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -28,7 +28,6 @@ import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.StringTokenizer; import java.util.Map.Entry; import org.apache.hadoop.classification.InterfaceAudience; @@ -45,9 +44,10 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsServerDefaults; -import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; @@ -473,6 +473,52 @@ public class ViewFileSystem extends FileSystem { res.targetFileSystem.setTimes(res.remainingPath, mtime, atime); } + @Override + public void modifyAclEntries(Path path, List aclSpec) + throws IOException { + InodeTree.ResolveResult res = fsState.resolve(getUriPath(path), + true); + res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec); + } + + @Override + public void removeAclEntries(Path path, List aclSpec) + throws IOException { + InodeTree.ResolveResult res = fsState.resolve(getUriPath(path), + true); + res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec); + } + + @Override + public void removeDefaultAcl(Path path) + throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + res.targetFileSystem.removeDefaultAcl(res.remainingPath); + } + + @Override + public void removeAcl(Path path) + throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + res.targetFileSystem.removeAcl(res.remainingPath); + } + + @Override + public void setAcl(Path path, List aclSpec) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + res.targetFileSystem.setAcl(res.remainingPath, aclSpec); + } + + @Override + public AclStatus getAclStatus(Path path) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + return res.targetFileSystem.getAclStatus(res.remainingPath); + } + @Override public void setVerifyChecksum(final boolean verifyChecksum) { List> mountPoints = diff --git a/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm index 53ef0cabef4..18a361d42f6 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm @@ -231,6 +231,29 @@ get Returns 0 on success and -1 on error. +getfacl + + Usage: << >>> + + Displays the Access Control Lists (ACLs) of files and directories. If a + directory has a default ACL, then getfacl also displays the default ACL. + + Options: + + * -R: List the ACLs of all files and directories recursively. + + * : File or directory to list. + + Examples: + + * <<>> + + * <<>> + + Exit Code: + + Returns 0 on success and non-zero on error. + getmerge Usage: << [addnl]>>> @@ -379,6 +402,54 @@ rmr Returns 0 on success and -1 on error. +setfacl + + Usage: <<} ]|[--set ] >>> + + Sets Access Control Lists (ACLs) of files and directories. + + Options: + + * -b: Remove all but the base ACL entries. The entries for user, group and + others are retained for compatibility with permission bits. + + * -k: Remove the default ACL. + + * -R: Apply operations to all files and directories recursively. + + * -m: Modify ACL. New entries are added to the ACL, and existing entries + are retained. + + * -x: Remove specified ACL entries. Other ACL entries are retained. + + * --set: Fully replace the ACL, discarding all existing entries. The + must include entries for user, group, and others for + compatibility with permission bits. + + * : Comma separated list of ACL entries. + + * : File or directory to modify. + + Examples: + + * <<>> + + * <<>> + + * <<>> + + * <<>> + + * <<>> + + * <<>> + + * <<>> + + Exit Code: + + Returns 0 on success and non-zero on error. + setrep Usage: << >>> diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java index a9c0da8d4e1..200e9f27261 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java @@ -21,6 +21,8 @@ package org.apache.hadoop.fs; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; @@ -33,6 +35,7 @@ import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.util.EnumSet; import java.util.Iterator; +import java.util.List; import static org.apache.hadoop.fs.Options.ChecksumOpt; import static org.apache.hadoop.fs.Options.CreateOpts; @@ -165,6 +168,20 @@ public class TestHarFileSystem { String snapshotNewName) throws IOException; public void deleteSnapshot(Path path, String snapshotName) throws IOException; + + public void modifyAclEntries(Path path, List aclSpec) + throws IOException; + + public void removeAclEntries(Path path, List aclSpec) + throws IOException; + + public void removeDefaultAcl(Path path) throws IOException; + + public void removeAcl(Path path) throws IOException; + + public void setAcl(Path path, List aclSpec) throws IOException; + + public AclStatus getAclStatus(Path path) throws IOException; } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestAcl.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestAcl.java new file mode 100644 index 00000000000..f33da8aa8be --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestAcl.java @@ -0,0 +1,210 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.permission; + +import static org.junit.Assert.*; + +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests covering basic functionality of the ACL objects. + */ +public class TestAcl { + private static AclEntry ENTRY1, ENTRY2, ENTRY3, ENTRY4, ENTRY5, ENTRY6, + ENTRY7, ENTRY8, ENTRY9, ENTRY10, ENTRY11, ENTRY12, ENTRY13; + private static AclStatus STATUS1, STATUS2, STATUS3, STATUS4; + + @BeforeClass + public static void setUp() { + // named user + AclEntry.Builder aclEntryBuilder = new AclEntry.Builder() + .setType(AclEntryType.USER) + .setName("user1") + .setPermission(FsAction.ALL); + ENTRY1 = aclEntryBuilder.build(); + ENTRY2 = aclEntryBuilder.build(); + // named group + ENTRY3 = new AclEntry.Builder() + .setType(AclEntryType.GROUP) + .setName("group2") + .setPermission(FsAction.READ_WRITE) + .build(); + // default other + ENTRY4 = new AclEntry.Builder() + .setType(AclEntryType.OTHER) + .setPermission(FsAction.NONE) + .setScope(AclEntryScope.DEFAULT) + .build(); + // owner + ENTRY5 = new AclEntry.Builder() + .setType(AclEntryType.USER) + .setPermission(FsAction.ALL) + .build(); + // default named group + ENTRY6 = new AclEntry.Builder() + .setType(AclEntryType.GROUP) + .setName("group3") + .setPermission(FsAction.READ_WRITE) + .setScope(AclEntryScope.DEFAULT) + .build(); + // other + ENTRY7 = new AclEntry.Builder() + .setType(AclEntryType.OTHER) + .setPermission(FsAction.NONE) + .build(); + // default named user + ENTRY8 = new AclEntry.Builder() + .setType(AclEntryType.USER) + .setName("user3") + .setPermission(FsAction.ALL) + .setScope(AclEntryScope.DEFAULT) + .build(); + // mask + ENTRY9 = new AclEntry.Builder() + .setType(AclEntryType.MASK) + .setPermission(FsAction.READ) + .build(); + // default mask + ENTRY10 = new AclEntry.Builder() + .setType(AclEntryType.MASK) + .setPermission(FsAction.READ_EXECUTE) + .setScope(AclEntryScope.DEFAULT) + .build(); + // group + ENTRY11 = new AclEntry.Builder() + .setType(AclEntryType.GROUP) + .setPermission(FsAction.READ) + .build(); + // default group + ENTRY12 = new AclEntry.Builder() + .setType(AclEntryType.GROUP) + .setPermission(FsAction.READ) + .setScope(AclEntryScope.DEFAULT) + .build(); + // default owner + ENTRY13 = new AclEntry.Builder() + .setType(AclEntryType.USER) + .setPermission(FsAction.ALL) + .setScope(AclEntryScope.DEFAULT) + .build(); + + AclStatus.Builder aclStatusBuilder = new AclStatus.Builder() + .owner("owner1") + .group("group1") + .addEntry(ENTRY1) + .addEntry(ENTRY3) + .addEntry(ENTRY4); + STATUS1 = aclStatusBuilder.build(); + STATUS2 = aclStatusBuilder.build(); + STATUS3 = new AclStatus.Builder() + .owner("owner2") + .group("group2") + .stickyBit(true) + .build(); + + STATUS4 = new AclStatus.Builder() + .addEntry(ENTRY1) + .addEntry(ENTRY3) + .addEntry(ENTRY4) + .addEntry(ENTRY5) + .addEntry(ENTRY6) + .addEntry(ENTRY7) + .addEntry(ENTRY8) + .addEntry(ENTRY9) + .addEntry(ENTRY10) + .addEntry(ENTRY11) + .addEntry(ENTRY12) + .addEntry(ENTRY13) + .build(); + } + + @Test + public void testEntryEquals() { + assertNotSame(ENTRY1, ENTRY2); + assertNotSame(ENTRY1, ENTRY3); + assertNotSame(ENTRY1, ENTRY4); + assertNotSame(ENTRY2, ENTRY3); + assertNotSame(ENTRY2, ENTRY4); + assertNotSame(ENTRY3, ENTRY4); + assertEquals(ENTRY1, ENTRY1); + assertEquals(ENTRY2, ENTRY2); + assertEquals(ENTRY1, ENTRY2); + assertEquals(ENTRY2, ENTRY1); + assertFalse(ENTRY1.equals(ENTRY3)); + assertFalse(ENTRY1.equals(ENTRY4)); + assertFalse(ENTRY3.equals(ENTRY4)); + assertFalse(ENTRY1.equals(null)); + assertFalse(ENTRY1.equals(new Object())); + } + + @Test + public void testEntryHashCode() { + assertEquals(ENTRY1.hashCode(), ENTRY2.hashCode()); + assertFalse(ENTRY1.hashCode() == ENTRY3.hashCode()); + assertFalse(ENTRY1.hashCode() == ENTRY4.hashCode()); + assertFalse(ENTRY3.hashCode() == ENTRY4.hashCode()); + } + + @Test + public void testEntryScopeIsAccessIfUnspecified() { + assertEquals(AclEntryScope.ACCESS, ENTRY1.getScope()); + assertEquals(AclEntryScope.ACCESS, ENTRY2.getScope()); + assertEquals(AclEntryScope.ACCESS, ENTRY3.getScope()); + assertEquals(AclEntryScope.DEFAULT, ENTRY4.getScope()); + } + + @Test + public void testStatusEquals() { + assertNotSame(STATUS1, STATUS2); + assertNotSame(STATUS1, STATUS3); + assertNotSame(STATUS2, STATUS3); + assertEquals(STATUS1, STATUS1); + assertEquals(STATUS2, STATUS2); + assertEquals(STATUS1, STATUS2); + assertEquals(STATUS2, STATUS1); + assertFalse(STATUS1.equals(STATUS3)); + assertFalse(STATUS2.equals(STATUS3)); + assertFalse(STATUS1.equals(null)); + assertFalse(STATUS1.equals(new Object())); + } + + @Test + public void testStatusHashCode() { + assertEquals(STATUS1.hashCode(), STATUS2.hashCode()); + assertFalse(STATUS1.hashCode() == STATUS3.hashCode()); + } + + @Test + public void testToString() { + assertEquals("user:user1:rwx", ENTRY1.toString()); + assertEquals("user:user1:rwx", ENTRY2.toString()); + assertEquals("group:group2:rw-", ENTRY3.toString()); + assertEquals("default:other::---", ENTRY4.toString()); + + assertEquals( + "owner: owner1, group: group1, acl: {entries: [user:user1:rwx, group:group2:rw-, default:other::---], stickyBit: false}", + STATUS1.toString()); + assertEquals( + "owner: owner1, group: group1, acl: {entries: [user:user1:rwx, group:group2:rw-, default:other::---], stickyBit: false}", + STATUS2.toString()); + assertEquals( + "owner: owner2, group: group2, acl: {entries: [], stickyBit: true}", + STATUS3.toString()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java index e46fb659f35..45d6e1aceaa 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java @@ -54,7 +54,7 @@ public class TestFsPermission extends TestCase { * the expected values back out for all combinations */ public void testConvertingPermissions() { - for(short s = 0; s < 01777; s++) { + for(short s = 0; s <= 01777; s++) { assertEquals(s, new FsPermission(s).toShort()); } @@ -64,10 +64,12 @@ public class TestFsPermission extends TestCase { for(FsAction u : FsAction.values()) { for(FsAction g : FsAction.values()) { for(FsAction o : FsAction.values()) { + // Cover constructor with sticky bit. FsPermission f = new FsPermission(u, g, o, sb); assertEquals(s, f.toShort()); FsPermission f2 = new FsPermission(f); assertEquals(s, f2.toShort()); + s++; } } @@ -75,48 +77,57 @@ public class TestFsPermission extends TestCase { } } - public void testStickyBitToString() { - // Check that every permission has its sticky bit represented correctly - for(boolean sb : new boolean [] { false, true }) { - for(FsAction u : FsAction.values()) { - for(FsAction g : FsAction.values()) { - for(FsAction o : FsAction.values()) { + public void testSpecialBitsToString() { + for (boolean sb : new boolean[] { false, true }) { + for (FsAction u : FsAction.values()) { + for (FsAction g : FsAction.values()) { + for (FsAction o : FsAction.values()) { FsPermission f = new FsPermission(u, g, o, sb); - if(f.getStickyBit() && f.getOtherAction().implies(EXECUTE)) - assertEquals('t', f.toString().charAt(8)); - else if(f.getStickyBit() && !f.getOtherAction().implies(EXECUTE)) - assertEquals('T', f.toString().charAt(8)); - else if(!f.getStickyBit() && f.getOtherAction().implies(EXECUTE)) - assertEquals('x', f.toString().charAt(8)); + String fString = f.toString(); + + // Check that sticky bit is represented correctly. + if (f.getStickyBit() && f.getOtherAction().implies(EXECUTE)) + assertEquals('t', fString.charAt(8)); + else if (f.getStickyBit() && !f.getOtherAction().implies(EXECUTE)) + assertEquals('T', fString.charAt(8)); + else if (!f.getStickyBit() && f.getOtherAction().implies(EXECUTE)) + assertEquals('x', fString.charAt(8)); else - assertEquals('-', f.toString().charAt(8)); + assertEquals('-', fString.charAt(8)); + + assertEquals(9, fString.length()); } } + } } } public void testFsPermission() { - String symbolic = "-rwxrwxrwx"; - StringBuilder b = new StringBuilder("-123456789"); + String symbolic = "-rwxrwxrwx"; - for(int i = 0; i < (1<<9); i++) { - for(int j = 1; j < 10; j++) { - b.setCharAt(j, '-'); + for(int i = 0; i < (1 << 10); i++) { + StringBuilder b = new StringBuilder("----------"); + String binary = String.format("%11s", Integer.toBinaryString(i)); + String permBinary = binary.substring(2, binary.length()); + + int len = permBinary.length(); + for(int j = 0; j < len; j++) { + if (permBinary.charAt(j) == '1') { + int k = 9 - (len - 1 - j); + b.setCharAt(k, symbolic.charAt(k)); } - String binary = Integer.toBinaryString(i); - - int len = binary.length(); - for(int j = 0; j < len; j++) { - if (binary.charAt(j) == '1') { - int k = 9 - (len - 1 - j); - b.setCharAt(k, symbolic.charAt(k)); - } - } - - assertEquals(i, FsPermission.valueOf(b.toString()).toShort()); } + + // Check for sticky bit. + if (binary.charAt(1) == '1') { + char replacement = b.charAt(9) == 'x' ? 't' : 'T'; + b.setCharAt(9, replacement); + } + + assertEquals(i, FsPermission.valueOf(b.toString()).toShort()); } + } public void testUMaskParser() throws IOException { Configuration conf = new Configuration(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestAclCommands.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestAclCommands.java new file mode 100644 index 00000000000..b14cd37c49d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestAclCommands.java @@ -0,0 +1,240 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.shell; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.RpcNoSuchMethodException; +import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Before; +import org.junit.Test; + +public class TestAclCommands { + + private Configuration conf = null; + + @Before + public void setup() throws IOException { + conf = new Configuration(); + } + + @Test + public void testGetfaclValidations() throws Exception { + assertFalse("getfacl should fail without path", + 0 == runCommand(new String[] { "-getfacl" })); + assertFalse("getfacl should fail with extra argument", + 0 == runCommand(new String[] { "-getfacl", "/test", "extraArg" })); + } + + @Test + public void testSetfaclValidations() throws Exception { + assertFalse("setfacl should fail without path", + 0 == runCommand(new String[] { "-setfacl" })); + assertFalse("setfacl should fail without aclSpec", + 0 == runCommand(new String[] { "-setfacl", "-m", "/path" })); + assertFalse("setfacl should fail with conflicting options", + 0 == runCommand(new String[] { "-setfacl", "-m", "/path" })); + assertFalse("setfacl should fail with extra arguments", + 0 == runCommand(new String[] { "-setfacl", "/path", "extra" })); + assertFalse("setfacl should fail with extra arguments", + 0 == runCommand(new String[] { "-setfacl", "--set", + "default:user::rwx", "/path", "extra" })); + assertFalse("setfacl should fail with permissions for -x", + 0 == runCommand(new String[] { "-setfacl", "-x", "user:user1:rwx", + "/path" })); + assertFalse("setfacl should fail ACL spec missing", + 0 == runCommand(new String[] { "-setfacl", "-m", + "", "/path" })); + } + + @Test + public void testMultipleAclSpecParsing() throws Exception { + List parsedList = AclEntry.parseAclSpec( + "group::rwx,user:user1:rwx,user:user2:rw-," + + "group:group1:rw-,default:group:group1:rw-", true); + + AclEntry basicAcl = new AclEntry.Builder().setType(AclEntryType.GROUP) + .setPermission(FsAction.ALL).build(); + AclEntry user1Acl = new AclEntry.Builder().setType(AclEntryType.USER) + .setPermission(FsAction.ALL).setName("user1").build(); + AclEntry user2Acl = new AclEntry.Builder().setType(AclEntryType.USER) + .setPermission(FsAction.READ_WRITE).setName("user2").build(); + AclEntry group1Acl = new AclEntry.Builder().setType(AclEntryType.GROUP) + .setPermission(FsAction.READ_WRITE).setName("group1").build(); + AclEntry defaultAcl = new AclEntry.Builder().setType(AclEntryType.GROUP) + .setPermission(FsAction.READ_WRITE).setName("group1") + .setScope(AclEntryScope.DEFAULT).build(); + List expectedList = new ArrayList(); + expectedList.add(basicAcl); + expectedList.add(user1Acl); + expectedList.add(user2Acl); + expectedList.add(group1Acl); + expectedList.add(defaultAcl); + assertEquals("Parsed Acl not correct", expectedList, parsedList); + } + + @Test + public void testMultipleAclSpecParsingWithoutPermissions() throws Exception { + List parsedList = AclEntry.parseAclSpec( + "user::,user:user1:,group::,group:group1:,mask::,other::," + + "default:user:user1::,default:mask::", false); + + AclEntry owner = new AclEntry.Builder().setType(AclEntryType.USER).build(); + AclEntry namedUser = new AclEntry.Builder().setType(AclEntryType.USER) + .setName("user1").build(); + AclEntry group = new AclEntry.Builder().setType(AclEntryType.GROUP).build(); + AclEntry namedGroup = new AclEntry.Builder().setType(AclEntryType.GROUP) + .setName("group1").build(); + AclEntry mask = new AclEntry.Builder().setType(AclEntryType.MASK).build(); + AclEntry other = new AclEntry.Builder().setType(AclEntryType.OTHER).build(); + AclEntry defaultUser = new AclEntry.Builder() + .setScope(AclEntryScope.DEFAULT).setType(AclEntryType.USER) + .setName("user1").build(); + AclEntry defaultMask = new AclEntry.Builder() + .setScope(AclEntryScope.DEFAULT).setType(AclEntryType.MASK).build(); + List expectedList = new ArrayList(); + expectedList.add(owner); + expectedList.add(namedUser); + expectedList.add(group); + expectedList.add(namedGroup); + expectedList.add(mask); + expectedList.add(other); + expectedList.add(defaultUser); + expectedList.add(defaultMask); + assertEquals("Parsed Acl not correct", expectedList, parsedList); + } + + @Test + public void testLsNoRpcForGetAclStatus() throws Exception { + Configuration conf = new Configuration(); + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///"); + conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class); + conf.setBoolean("stubfs.noRpcForGetAclStatus", true); + assertEquals("ls must succeed even if getAclStatus RPC does not exist.", + 0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" })); + } + + @Test + public void testLsAclsUnsupported() throws Exception { + Configuration conf = new Configuration(); + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///"); + conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class); + assertEquals("ls must succeed even if FileSystem does not implement ACLs.", + 0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" })); + } + + public static class StubFileSystem extends FileSystem { + + public FSDataOutputStream append(Path f, int bufferSize, + Progressable progress) throws IOException { + return null; + } + + public FSDataOutputStream create(Path f, FsPermission permission, + boolean overwrite, int bufferSize, short replication, long blockSize, + Progressable progress) throws IOException { + return null; + } + + @Override + public boolean delete(Path f, boolean recursive) throws IOException { + return false; + } + + public AclStatus getAclStatus(Path path) throws IOException { + if (getConf().getBoolean("stubfs.noRpcForGetAclStatus", false)) { + throw new RemoteException(RpcNoSuchMethodException.class.getName(), + "test exception"); + } + return super.getAclStatus(path); + } + + @Override + public FileStatus getFileStatus(Path f) throws IOException { + if (f.isRoot()) { + return new FileStatus(0, true, 0, 0, 0, f); + } + return null; + } + + @Override + public URI getUri() { + return URI.create("stubfs:///"); + } + + @Override + public Path getWorkingDirectory() { + return null; + } + + @Override + public FileStatus[] listStatus(Path f) throws IOException { + FsPermission perm = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, + FsAction.READ_EXECUTE); + Path path = new Path("/foo"); + FileStatus stat = new FileStatus(1000, true, 3, 1000, 0, 0, perm, "owner", + "group", path); + return new FileStatus[] { stat }; + } + + @Override + public boolean mkdirs(Path f, FsPermission permission) + throws IOException { + return false; + } + + @Override + public FSDataInputStream open(Path f, int bufferSize) throws IOException { + return null; + } + + @Override + public boolean rename(Path src, Path dst) throws IOException { + return false; + } + + @Override + public void setWorkingDirectory(Path dir) { + } + } + + private int runCommand(String[] commands) throws Exception { + return ToolRunner.run(conf, new FsShell(), commands); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java index 8454025ba3d..e8d4656858d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java @@ -20,6 +20,8 @@ package org.apache.hadoop.fs.viewfs; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; +import java.util.Collections; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -29,6 +31,7 @@ import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.viewfs.ChRootedFileSystem; import org.junit.After; import org.junit.Assert; @@ -354,6 +357,44 @@ public class TestChRootedFileSystem { new ChRootedFileSystem(chrootUri, conf); } + /** + * Tests that ChRootedFileSystem delegates calls for every ACL method to the + * underlying FileSystem with all Path arguments translated as required to + * enforce chroot. + */ + @Test + public void testAclMethodsPathTranslation() throws IOException { + Configuration conf = new Configuration(); + conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class); + + URI chrootUri = URI.create("mockfs://foo/a/b"); + ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf); + FileSystem mockFs = ((FilterFileSystem)chrootFs.getRawFileSystem()) + .getRawFileSystem(); + + Path chrootPath = new Path("/c"); + Path rawPath = new Path("/a/b/c"); + List entries = Collections.emptyList(); + + chrootFs.modifyAclEntries(chrootPath, entries); + verify(mockFs).modifyAclEntries(rawPath, entries); + + chrootFs.removeAclEntries(chrootPath, entries); + verify(mockFs).removeAclEntries(rawPath, entries); + + chrootFs.removeDefaultAcl(chrootPath); + verify(mockFs).removeDefaultAcl(rawPath); + + chrootFs.removeAcl(chrootPath); + verify(mockFs).removeAcl(rawPath); + + chrootFs.setAcl(chrootPath, entries); + verify(mockFs).setAcl(rawPath, entries); + + chrootFs.getAclStatus(chrootPath); + verify(mockFs).getAclStatus(rawPath); + } + static class MockFileSystem extends FilterFileSystem { MockFileSystem() { super(mock(FileSystem.class)); @@ -361,4 +402,4 @@ public class TestChRootedFileSystem { @Override public void initialize(URI name, Configuration conf) throws IOException {} } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java index 487ce91a078..cb1f413bda4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java @@ -20,14 +20,19 @@ package org.apache.hadoop.fs.viewfs; import java.io.IOException; import java.net.URI; +import java.util.Collections; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.viewfs.TestChRootedFileSystem.MockFileSystem; import org.junit.*; import static org.junit.Assert.*; +import static org.mockito.Mockito.*; /** * Verify that viewfs propagates certain methods to the underlying fs @@ -57,6 +62,15 @@ public class TestViewFileSystemDelegation { //extends ViewFileSystemTestSetup { return fs; } + private static FileSystem setupMockFileSystem(Configuration conf, URI uri) + throws Exception { + String scheme = uri.getScheme(); + conf.set("fs." + scheme + ".impl", MockFileSystem.class.getName()); + FileSystem fs = FileSystem.get(uri, conf); + ConfigUtil.addLink(conf, "/mounts/" + scheme, uri); + return ((MockFileSystem)fs).getRawFileSystem(); + } + @Test public void testSanity() { assertEquals("fs1:/", fs1.getUri().toString()); @@ -69,6 +83,55 @@ public class TestViewFileSystemDelegation { //extends ViewFileSystemTestSetup { checkVerifyChecksum(true); } + /** + * Tests that ViewFileSystem dispatches calls for every ACL method through the + * mount table to the correct underlying FileSystem with all Path arguments + * translated as required. + */ + @Test + public void testAclMethods() throws Exception { + Configuration conf = ViewFileSystemTestSetup.createConfig(); + FileSystem mockFs1 = setupMockFileSystem(conf, new URI("mockfs1:/")); + FileSystem mockFs2 = setupMockFileSystem(conf, new URI("mockfs2:/")); + FileSystem viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf); + + Path viewFsPath1 = new Path("/mounts/mockfs1/a/b/c"); + Path mockFsPath1 = new Path("/a/b/c"); + Path viewFsPath2 = new Path("/mounts/mockfs2/d/e/f"); + Path mockFsPath2 = new Path("/d/e/f"); + List entries = Collections.emptyList(); + + viewFs.modifyAclEntries(viewFsPath1, entries); + verify(mockFs1).modifyAclEntries(mockFsPath1, entries); + viewFs.modifyAclEntries(viewFsPath2, entries); + verify(mockFs2).modifyAclEntries(mockFsPath2, entries); + + viewFs.removeAclEntries(viewFsPath1, entries); + verify(mockFs1).removeAclEntries(mockFsPath1, entries); + viewFs.removeAclEntries(viewFsPath2, entries); + verify(mockFs2).removeAclEntries(mockFsPath2, entries); + + viewFs.removeDefaultAcl(viewFsPath1); + verify(mockFs1).removeDefaultAcl(mockFsPath1); + viewFs.removeDefaultAcl(viewFsPath2); + verify(mockFs2).removeDefaultAcl(mockFsPath2); + + viewFs.removeAcl(viewFsPath1); + verify(mockFs1).removeAcl(mockFsPath1); + viewFs.removeAcl(viewFsPath2); + verify(mockFs2).removeAcl(mockFsPath2); + + viewFs.setAcl(viewFsPath1, entries); + verify(mockFs1).setAcl(mockFsPath1, entries); + viewFs.setAcl(viewFsPath2, entries); + verify(mockFs2).setAcl(mockFsPath2, entries); + + viewFs.getAclStatus(viewFsPath1); + verify(mockFs1).getAclStatus(mockFsPath1); + viewFs.getAclStatus(viewFsPath2); + verify(mockFs2).getAclStatus(mockFsPath2); + } + void checkVerifyChecksum(boolean flag) { viewFs.setVerifyChecksum(flag); assertEquals(flag, fs1.getVerifyChecksum()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 232bbc86c6e..570646cbf3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -477,6 +477,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ClientNamenodeProtocol.proto NamenodeProtocol.proto + acl.proto ${project.build.directory}/generated-sources/java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 985dc57544f..dbf379fa9d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -115,9 +115,12 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.VolumeId; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; @@ -2641,6 +2644,95 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory { return clientContext; } + void modifyAclEntries(String src, List aclSpec) + throws IOException { + checkOpen(); + try { + namenode.modifyAclEntries(src, aclSpec); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + AclException.class, + FileNotFoundException.class, + NSQuotaExceededException.class, + SafeModeException.class, + SnapshotAccessControlException.class, + UnresolvedPathException.class); + } + } + + void removeAclEntries(String src, List aclSpec) + throws IOException { + checkOpen(); + try { + namenode.removeAclEntries(src, aclSpec); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + AclException.class, + FileNotFoundException.class, + NSQuotaExceededException.class, + SafeModeException.class, + SnapshotAccessControlException.class, + UnresolvedPathException.class); + } + } + + void removeDefaultAcl(String src) throws IOException { + checkOpen(); + try { + namenode.removeDefaultAcl(src); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + AclException.class, + FileNotFoundException.class, + NSQuotaExceededException.class, + SafeModeException.class, + SnapshotAccessControlException.class, + UnresolvedPathException.class); + } + } + + void removeAcl(String src) throws IOException { + checkOpen(); + try { + namenode.removeAcl(src); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + AclException.class, + FileNotFoundException.class, + NSQuotaExceededException.class, + SafeModeException.class, + SnapshotAccessControlException.class, + UnresolvedPathException.class); + } + } + + void setAcl(String src, List aclSpec) throws IOException { + checkOpen(); + try { + namenode.setAcl(src, aclSpec); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + AclException.class, + FileNotFoundException.class, + NSQuotaExceededException.class, + SafeModeException.class, + SnapshotAccessControlException.class, + UnresolvedPathException.class); + } + } + + AclStatus getAclStatus(String src) throws IOException { + checkOpen(); + try { + return namenode.getAclStatus(src); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + AclException.class, + FileNotFoundException.class, + UnresolvedPathException.class); + } + } + @Override // RemotePeerFactory public Peer newConnectedPeer(InetSocketAddress addr) throws IOException { Peer peer = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 40410d56b99..c66a2bdecfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -72,6 +72,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_CLIENT_RETRY_TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT = 3; public static final String DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH = "dfs.client.retry.interval-ms.get-last-block-length"; public static final int DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT = 4000; + public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT = + "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$"; // HA related configuration public static final String DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX = "dfs.client.failover.proxy.provider"; @@ -184,6 +186,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true; public static final String DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup"; public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup"; + public static final String DFS_NAMENODE_ACLS_ENABLED_KEY = "dfs.namenode.acls.enabled"; + public static final boolean DFS_NAMENODE_ACLS_ENABLED_DEFAULT = false; public static final String DFS_ADMIN = "dfs.cluster.administrators"; public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource"; public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 9df725e592b..2ba0828f6c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -54,6 +54,8 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.VolumeId; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; @@ -1787,4 +1789,130 @@ public class DistributedFileSystem extends FileSystem { public RemoteIterator listCachePools() throws IOException { return dfs.listCachePools(); } + + /** + * {@inheritDoc} + */ + @Override + public void modifyAclEntries(Path path, final List aclSpec) + throws IOException { + Path absF = fixRelativePart(path); + new FileSystemLinkResolver() { + @Override + public Void doCall(final Path p) throws IOException { + dfs.modifyAclEntries(getPathName(p), aclSpec); + return null; + } + + @Override + public Void next(final FileSystem fs, final Path p) throws IOException { + fs.modifyAclEntries(p, aclSpec); + return null; + } + }.resolve(this, absF); + } + + /** + * {@inheritDoc} + */ + @Override + public void removeAclEntries(Path path, final List aclSpec) + throws IOException { + Path absF = fixRelativePart(path); + new FileSystemLinkResolver() { + @Override + public Void doCall(final Path p) throws IOException { + dfs.removeAclEntries(getPathName(p), aclSpec); + return null; + } + + @Override + public Void next(final FileSystem fs, final Path p) throws IOException { + fs.removeAclEntries(p, aclSpec); + return null; + } + }.resolve(this, absF); + } + + /** + * {@inheritDoc} + */ + @Override + public void removeDefaultAcl(Path path) throws IOException { + final Path absF = fixRelativePart(path); + new FileSystemLinkResolver() { + @Override + public Void doCall(final Path p) throws IOException { + dfs.removeDefaultAcl(getPathName(p)); + return null; + } + @Override + public Void next(final FileSystem fs, final Path p) + throws IOException, UnresolvedLinkException { + fs.removeDefaultAcl(p); + return null; + } + }.resolve(this, absF); + } + + /** + * {@inheritDoc} + */ + @Override + public void removeAcl(Path path) throws IOException { + final Path absF = fixRelativePart(path); + new FileSystemLinkResolver() { + @Override + public Void doCall(final Path p) throws IOException { + dfs.removeAcl(getPathName(p)); + return null; + } + @Override + public Void next(final FileSystem fs, final Path p) + throws IOException, UnresolvedLinkException { + fs.removeAcl(p); + return null; + } + }.resolve(this, absF); + } + + /** + * {@inheritDoc} + */ + @Override + public void setAcl(Path path, final List aclSpec) throws IOException { + Path absF = fixRelativePart(path); + new FileSystemLinkResolver() { + @Override + public Void doCall(final Path p) throws IOException { + dfs.setAcl(getPathName(p), aclSpec); + return null; + } + + @Override + public Void next(final FileSystem fs, final Path p) throws IOException { + fs.setAcl(p, aclSpec); + return null; + } + }.resolve(this, absF); + } + + /** + * {@inheritDoc} + */ + @Override + public AclStatus getAclStatus(Path path) throws IOException { + final Path absF = fixRelativePart(path); + return new FileSystemLinkResolver() { + @Override + public AclStatus doCall(final Path p) throws IOException { + return dfs.getAclStatus(getPathName(p)); + } + @Override + public AclStatus next(final FileSystem fs, final Path p) + throws IOException, UnresolvedLinkException { + return fs.getAclStatus(p); + } + }.resolve(this, absF); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java new file mode 100644 index 00000000000..12109999d8d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Indicates a failure manipulating an ACL. + */ +@InterfaceAudience.Private +public class AclException extends IOException { + private static final long serialVersionUID = 1L; + + /** + * Creates a new AclException. + * + * @param message String message + */ + public AclException(String message) { + super(message); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 18751a2246a..106ea623c59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.protocol; import java.io.FileNotFoundException; import java.io.IOException; import java.util.EnumSet; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -34,6 +35,8 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; @@ -1184,4 +1187,49 @@ public interface ClientProtocol { @Idempotent public BatchedEntries listCachePools(String prevPool) throws IOException; + + /** + * Modifies ACL entries of files and directories. This method can add new ACL + * entries or modify the permissions on existing ACL entries. All existing + * ACL entries that are not specified in this call are retained without + * changes. (Modifications are merged into the current ACL.) + */ + @Idempotent + public void modifyAclEntries(String src, List aclSpec) + throws IOException; + + /** + * Removes ACL entries from files and directories. Other ACL entries are + * retained. + */ + @Idempotent + public void removeAclEntries(String src, List aclSpec) + throws IOException; + + /** + * Removes all default ACL entries from files and directories. + */ + @Idempotent + public void removeDefaultAcl(String src) throws IOException; + + /** + * Removes all but the base ACL entries of files and directories. The entries + * for user, group, and others are retained for compatibility with permission + * bits. + */ + @Idempotent + public void removeAcl(String src) throws IOException; + + /** + * Fully replaces ACL of files and directories, discarding all existing + * entries. + */ + @Idempotent + public void setAcl(String src, List aclSpec) throws IOException; + + /** + * Gets the ACLs of files and directories. + */ + @Idempotent + public AclStatus getAclStatus(String src) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java index 9842b53fbd3..2739d2596ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java @@ -113,7 +113,11 @@ public class LayoutVersion { + " Use distinct StorageUuid per storage directory."), ADD_LAYOUT_FLAGS(-50, "Add support for layout flags."), CACHING(-51, "Support for cache pools and path-based caching"), - PROTOBUF_FORMAT(-52, "Use protobuf to serialize FSImage"); + // Hadoop 2.4.0 + PROTOBUF_FORMAT(-52, "Use protobuf to serialize FSImage"), + EXTENDED_ACL(-53, "Extended ACL"), + RESERVED_REL2_4_0(-54, -51, "Reserved for release 2.4.0", true, + PROTOBUF_FORMAT, EXTENDED_ACL); final int lv; final int ancestorLV; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index c8ab938044d..517991f75a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -37,6 +37,18 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; @@ -270,6 +282,24 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements VOID_SETBALANCERBANDWIDTH_RESPONSE = SetBalancerBandwidthResponseProto.newBuilder().build(); + private static final SetAclResponseProto + VOID_SETACL_RESPONSE = SetAclResponseProto.getDefaultInstance(); + + private static final ModifyAclEntriesResponseProto + VOID_MODIFYACLENTRIES_RESPONSE = ModifyAclEntriesResponseProto + .getDefaultInstance(); + + private static final RemoveAclEntriesResponseProto + VOID_REMOVEACLENTRIES_RESPONSE = RemoveAclEntriesResponseProto + .getDefaultInstance(); + + private static final RemoveDefaultAclResponseProto + VOID_REMOVEDEFAULTACL_RESPONSE = RemoveDefaultAclResponseProto + .getDefaultInstance(); + + private static final RemoveAclResponseProto + VOID_REMOVEACL_RESPONSE = RemoveAclResponseProto.getDefaultInstance(); + /** * Constructor * @@ -1143,4 +1173,73 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements throw new ServiceException(e); } } + + @Override + public ModifyAclEntriesResponseProto modifyAclEntries( + RpcController controller, ModifyAclEntriesRequestProto req) + throws ServiceException { + try { + server.modifyAclEntries(req.getSrc(), PBHelper.convertAclEntry(req.getAclSpecList())); + } catch (IOException e) { + throw new ServiceException(e); + } + return VOID_MODIFYACLENTRIES_RESPONSE; + } + + @Override + public RemoveAclEntriesResponseProto removeAclEntries( + RpcController controller, RemoveAclEntriesRequestProto req) + throws ServiceException { + try { + server.removeAclEntries(req.getSrc(), + PBHelper.convertAclEntry(req.getAclSpecList())); + } catch (IOException e) { + throw new ServiceException(e); + } + return VOID_REMOVEACLENTRIES_RESPONSE; + } + + @Override + public RemoveDefaultAclResponseProto removeDefaultAcl( + RpcController controller, RemoveDefaultAclRequestProto req) + throws ServiceException { + try { + server.removeDefaultAcl(req.getSrc()); + } catch (IOException e) { + throw new ServiceException(e); + } + return VOID_REMOVEDEFAULTACL_RESPONSE; + } + + @Override + public RemoveAclResponseProto removeAcl(RpcController controller, + RemoveAclRequestProto req) throws ServiceException { + try { + server.removeAcl(req.getSrc()); + } catch (IOException e) { + throw new ServiceException(e); + } + return VOID_REMOVEACL_RESPONSE; + } + + @Override + public SetAclResponseProto setAcl(RpcController controller, + SetAclRequestProto req) throws ServiceException { + try { + server.setAcl(req.getSrc(), PBHelper.convertAclEntry(req.getAclSpecList())); + } catch (IOException e) { + throw new ServiceException(e); + } + return VOID_SETACL_RESPONSE; + } + + @Override + public GetAclStatusResponseProto getAclStatus(RpcController controller, + GetAclStatusRequestProto req) throws ServiceException { + try { + return PBHelper.convert(server.getAclStatus(req.getSrc())); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 9b303f22cdf..c037792ebb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -22,6 +22,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; import java.util.EnumSet; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -34,6 +35,8 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; @@ -55,6 +58,12 @@ import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; @@ -1164,4 +1173,76 @@ public class ClientNamenodeProtocolTranslatorPB implements throw ProtobufHelper.getRemoteException(e); } } + + @Override + public void modifyAclEntries(String src, List aclSpec) + throws IOException { + ModifyAclEntriesRequestProto req = ModifyAclEntriesRequestProto + .newBuilder().setSrc(src) + .addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec)).build(); + try { + rpcProxy.modifyAclEntries(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void removeAclEntries(String src, List aclSpec) + throws IOException { + RemoveAclEntriesRequestProto req = RemoveAclEntriesRequestProto + .newBuilder().setSrc(src) + .addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec)).build(); + try { + rpcProxy.removeAclEntries(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void removeDefaultAcl(String src) throws IOException { + RemoveDefaultAclRequestProto req = RemoveDefaultAclRequestProto + .newBuilder().setSrc(src).build(); + try { + rpcProxy.removeDefaultAcl(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void removeAcl(String src) throws IOException { + RemoveAclRequestProto req = RemoveAclRequestProto.newBuilder() + .setSrc(src).build(); + try { + rpcProxy.removeAcl(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void setAcl(String src, List aclSpec) throws IOException { + SetAclRequestProto req = SetAclRequestProto.newBuilder() + .setSrc(src) + .addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec)) + .build(); + try { + rpcProxy.setAcl(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public AclStatus getAclStatus(String src) throws IOException { + GetAclStatusRequestProto req = GetAclStatusRequestProto.newBuilder() + .setSrc(src).build(); + try { + return PBHelper.convert(rpcProxy.getAclStatus(null, req)); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index e9b7b1b47d3..b5dc0ee64ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -33,6 +33,11 @@ import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSUtil; @@ -61,6 +66,12 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto; @@ -190,6 +201,13 @@ public class PBHelper { RegisterCommandProto.newBuilder().build(); private static final RegisterCommand REG_CMD = new RegisterCommand(); + private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = + AclEntryScope.values(); + private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = + AclEntryType.values(); + private static final FsAction[] FSACTION_VALUES = + FsAction.values(); + private PBHelper() { /** Hidden constructor */ } @@ -198,6 +216,10 @@ public class PBHelper { return ByteString.copyFrom(bytes); } + private static , U extends Enum> U castEnum(T from, U[] to) { + return to[from.ordinal()]; + } + public static NamenodeRole convert(NamenodeRoleProto role) { switch (role) { case NAMENODE: @@ -750,8 +772,9 @@ public class PBHelper { return REG_CMD; case BlockIdCommand: return PBHelper.convert(proto.getBlkIdCmd()); + default: + return null; } - return null; } public static BalancerBandwidthCommandProto convert( @@ -1887,5 +1910,74 @@ public class PBHelper { assert size >= 0; return new ExactSizeInputStream(input, size); } + + private static AclEntryScopeProto convert(AclEntryScope v) { + return AclEntryScopeProto.valueOf(v.ordinal()); + } + + private static AclEntryScope convert(AclEntryScopeProto v) { + return castEnum(v, ACL_ENTRY_SCOPE_VALUES); + } + + private static AclEntryTypeProto convert(AclEntryType e) { + return AclEntryTypeProto.valueOf(e.ordinal()); + } + + private static AclEntryType convert(AclEntryTypeProto v) { + return castEnum(v, ACL_ENTRY_TYPE_VALUES); + } + + private static FsActionProto convert(FsAction v) { + return FsActionProto.valueOf(v != null ? v.ordinal() : 0); + } + + private static FsAction convert(FsActionProto v) { + return castEnum(v, FSACTION_VALUES); + } + + public static List convertAclEntryProto( + List aclSpec) { + ArrayList r = Lists.newArrayListWithCapacity(aclSpec.size()); + for (AclEntry e : aclSpec) { + AclEntryProto.Builder builder = AclEntryProto.newBuilder(); + builder.setType(convert(e.getType())); + builder.setScope(convert(e.getScope())); + builder.setPermissions(convert(e.getPermission())); + if (e.getName() != null) { + builder.setName(e.getName()); + } + r.add(builder.build()); + } + return r; + } + + public static List convertAclEntry(List aclSpec) { + ArrayList r = Lists.newArrayListWithCapacity(aclSpec.size()); + for (AclEntryProto e : aclSpec) { + AclEntry.Builder builder = new AclEntry.Builder(); + builder.setType(convert(e.getType())); + builder.setScope(convert(e.getScope())); + builder.setPermission(convert(e.getPermissions())); + if (e.hasName()) { + builder.setName(e.getName()); + } + r.add(builder.build()); + } + return r; + } + + public static AclStatus convert(GetAclStatusResponseProto e) { + AclStatusProto r = e.getResult(); + return new AclStatus.Builder().owner(r.getOwner()).group(r.getGroup()) + .stickyBit(r.getSticky()) + .addEntries(convertAclEntry(r.getEntriesList())).build(); + } + + public static GetAclStatusResponseProto convert(AclStatus e) { + AclStatusProto r = AclStatusProto.newBuilder().setOwner(e.getOwner()) + .setGroup(e.getGroup()).setSticky(e.isStickyBit()) + .addAllEntries(convertAclEntryProto(e.getEntries())).build(); + return GetAclStatusResponseProto.newBuilder().setResult(r).build(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclConfigFlag.java new file mode 100644 index 00000000000..bfc6b4d7a4c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclConfigFlag.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.AclException; + +/** + * Support for ACLs is controlled by a configuration flag. If the configuration + * flag is false, then the NameNode will reject all ACL-related operations. + */ +final class AclConfigFlag { + private final boolean enabled; + + /** + * Creates a new AclConfigFlag from configuration. + * + * @param conf Configuration to check + */ + public AclConfigFlag(Configuration conf) { + enabled = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, + DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT); + LogFactory.getLog(AclConfigFlag.class).info("ACLs enabled? " + enabled); + } + + /** + * Checks the flag on behalf of an ACL API call. + * + * @throws AclException if ACLs are disabled + */ + public void checkForApiCall() throws AclException { + if (!enabled) { + throw new AclException(String.format( + "The ACL operation has been rejected. " + + "Support for ACLs has been disabled by setting %s to false.", + DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY)); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java new file mode 100644 index 00000000000..1c5f469b3b0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.permission.AclEntry; + +import com.google.common.collect.ImmutableList; + +/** + * Feature that represents the ACLs of the inode. + */ +@InterfaceAudience.Private +public class AclFeature implements INode.Feature { + public static final ImmutableList EMPTY_ENTRY_LIST = + ImmutableList.of(); + + private final ImmutableList entries; + + public AclFeature(ImmutableList entries) { + this.entries = entries; + } + + public ImmutableList getEntries() { + return entries; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java new file mode 100644 index 00000000000..a79bb393f9f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java @@ -0,0 +1,406 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.Collections; +import java.util.List; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.AclException; +import org.apache.hadoop.hdfs.protocol.QuotaExceededException; + +/** + * AclStorage contains utility methods that define how ACL data is stored in the + * namespace. + * + * If an inode has an ACL, then the ACL bit is set in the inode's + * {@link FsPermission} and the inode also contains an {@link AclFeature}. For + * the access ACL, the owner and other entries are identical to the owner and + * other bits stored in FsPermission, so we reuse those. The access mask entry + * is stored into the group permission bits of FsPermission. This is consistent + * with other file systems' implementations of ACLs and eliminates the need for + * special handling in various parts of the codebase. For example, if a user + * calls chmod to change group permission bits on a file with an ACL, then the + * expected behavior is to change the ACL's mask entry. By saving the mask entry + * into the group permission bits, chmod continues to work correctly without + * special handling. All remaining access entries (named users and named groups) + * are stored as explicit {@link AclEntry} instances in a list inside the + * AclFeature. Additionally, all default entries are stored in the AclFeature. + * + * The methods in this class encapsulate these rules for reading or writing the + * ACL entries to the appropriate location. + * + * The methods in this class assume that input ACL entry lists have already been + * validated and sorted according to the rules enforced by + * {@link AclTransformation}. + */ +@InterfaceAudience.Private +final class AclStorage { + + /** + * If a default ACL is defined on a parent directory, then copies that default + * ACL to a newly created child file or directory. + * + * @param child INode newly created child + */ + public static void copyINodeDefaultAcl(INode child) { + INodeDirectory parent = child.getParent(); + AclFeature parentAclFeature = parent.getAclFeature(); + if (parentAclFeature == null || !(child.isFile() || child.isDirectory())) { + return; + } + + // Split parent's entries into access vs. default. + List featureEntries = parent.getAclFeature().getEntries(); + ScopedAclEntries scopedEntries = new ScopedAclEntries(featureEntries); + List parentDefaultEntries = scopedEntries.getDefaultEntries(); + + // The parent may have an access ACL but no default ACL. If so, exit. + if (parentDefaultEntries.isEmpty()) { + return; + } + + // Pre-allocate list size for access entries to copy from parent. + List accessEntries = Lists.newArrayListWithCapacity( + parentDefaultEntries.size()); + + FsPermission childPerm = child.getFsPermission(); + + // Copy each default ACL entry from parent to new child's access ACL. + boolean parentDefaultIsMinimal = isMinimalAcl(parentDefaultEntries); + for (AclEntry entry: parentDefaultEntries) { + AclEntryType type = entry.getType(); + String name = entry.getName(); + AclEntry.Builder builder = new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(type) + .setName(name); + + // The child's initial permission bits are treated as the mode parameter, + // which can filter copied permission values for owner, mask and other. + final FsAction permission; + if (type == AclEntryType.USER && name == null) { + permission = entry.getPermission().and(childPerm.getUserAction()); + } else if (type == AclEntryType.GROUP && parentDefaultIsMinimal) { + // This only happens if the default ACL is a minimal ACL: exactly 3 + // entries corresponding to owner, group and other. In this case, + // filter the group permissions. + permission = entry.getPermission().and(childPerm.getGroupAction()); + } else if (type == AclEntryType.MASK) { + // Group bits from mode parameter filter permission of mask entry. + permission = entry.getPermission().and(childPerm.getGroupAction()); + } else if (type == AclEntryType.OTHER) { + permission = entry.getPermission().and(childPerm.getOtherAction()); + } else { + permission = entry.getPermission(); + } + + builder.setPermission(permission); + accessEntries.add(builder.build()); + } + + // A new directory also receives a copy of the parent's default ACL. + List defaultEntries = child.isDirectory() ? parentDefaultEntries : + Collections.emptyList(); + + final FsPermission newPerm; + if (!isMinimalAcl(accessEntries) || !defaultEntries.isEmpty()) { + // Save the new ACL to the child. + child.addAclFeature(createAclFeature(accessEntries, defaultEntries)); + newPerm = createFsPermissionForExtendedAcl(accessEntries, childPerm); + } else { + // The child is receiving a minimal ACL. + newPerm = createFsPermissionForMinimalAcl(accessEntries, childPerm); + } + + child.setPermission(newPerm); + } + + /** + * Reads the existing extended ACL entries of an inode. This method returns + * only the extended ACL entries stored in the AclFeature. If the inode does + * not have an ACL, then this method returns an empty list. This method + * supports querying by snapshot ID. + * + * @param inode INode to read + * @param snapshotId int ID of snapshot to read + * @return List containing extended inode ACL entries + */ + public static List readINodeAcl(INode inode, int snapshotId) { + AclFeature f = inode.getAclFeature(snapshotId); + return f == null ? ImmutableList. of() : f.getEntries(); + } + + /** + * Reads the existing ACL of an inode. This method always returns the full + * logical ACL of the inode after reading relevant data from the inode's + * {@link FsPermission} and {@link AclFeature}. Note that every inode + * logically has an ACL, even if no ACL has been set explicitly. If the inode + * does not have an extended ACL, then the result is a minimal ACL consising of + * exactly 3 entries that correspond to the owner, group and other permissions. + * This method always reads the inode's current state and does not support + * querying by snapshot ID. This is because the method is intended to support + * ACL modification APIs, which always apply a delta on top of current state. + * + * @param inode INode to read + * @return List containing all logical inode ACL entries + */ + public static List readINodeLogicalAcl(INode inode) { + FsPermission perm = inode.getFsPermission(); + AclFeature f = inode.getAclFeature(); + if (f == null) { + return getMinimalAcl(perm); + } + + final List existingAcl; + // Split ACL entries stored in the feature into access vs. default. + List featureEntries = f.getEntries(); + ScopedAclEntries scoped = new ScopedAclEntries(featureEntries); + List accessEntries = scoped.getAccessEntries(); + List defaultEntries = scoped.getDefaultEntries(); + + // Pre-allocate list size for the explicit entries stored in the feature + // plus the 3 implicit entries (owner, group and other) from the permission + // bits. + existingAcl = Lists.newArrayListWithCapacity(featureEntries.size() + 3); + + if (!accessEntries.isEmpty()) { + // Add owner entry implied from user permission bits. + existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER).setPermission(perm.getUserAction()) + .build()); + + // Next add all named user and group entries taken from the feature. + existingAcl.addAll(accessEntries); + + // Add mask entry implied from group permission bits. + existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.MASK).setPermission(perm.getGroupAction()) + .build()); + + // Add other entry implied from other permission bits. + existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER).setPermission(perm.getOtherAction()) + .build()); + } else { + // It's possible that there is a default ACL but no access ACL. In this + // case, add the minimal access ACL implied by the permission bits. + existingAcl.addAll(getMinimalAcl(perm)); + } + + // Add all default entries after the access entries. + existingAcl.addAll(defaultEntries); + + // The above adds entries in the correct order, so no need to sort here. + return existingAcl; + } + + /** + * Completely removes the ACL from an inode. + * + * @param inode INode to update + * @param snapshotId int latest snapshot ID of inode + * @throws QuotaExceededException if quota limit is exceeded + */ + public static void removeINodeAcl(INode inode, int snapshotId) + throws QuotaExceededException { + AclFeature f = inode.getAclFeature(); + if (f == null) { + return; + } + + FsPermission perm = inode.getFsPermission(); + List featureEntries = f.getEntries(); + if (featureEntries.get(0).getScope() == AclEntryScope.ACCESS) { + // Restore group permissions from the feature's entry to permission + // bits, overwriting the mask, which is not part of a minimal ACL. + AclEntry groupEntryKey = new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS).setType(AclEntryType.GROUP).build(); + int groupEntryIndex = Collections.binarySearch(featureEntries, + groupEntryKey, AclTransformation.ACL_ENTRY_COMPARATOR); + assert groupEntryIndex >= 0; + FsAction groupPerm = featureEntries.get(groupEntryIndex).getPermission(); + FsPermission newPerm = new FsPermission(perm.getUserAction(), groupPerm, + perm.getOtherAction(), perm.getStickyBit()); + inode.setPermission(newPerm, snapshotId); + } + + inode.removeAclFeature(snapshotId); + } + + /** + * Updates an inode with a new ACL. This method takes a full logical ACL and + * stores the entries to the inode's {@link FsPermission} and + * {@link AclFeature}. + * + * @param inode INode to update + * @param newAcl List containing new ACL entries + * @param snapshotId int latest snapshot ID of inode + * @throws AclException if the ACL is invalid for the given inode + * @throws QuotaExceededException if quota limit is exceeded + */ + public static void updateINodeAcl(INode inode, List newAcl, + int snapshotId) throws AclException, QuotaExceededException { + assert newAcl.size() >= 3; + FsPermission perm = inode.getFsPermission(); + final FsPermission newPerm; + if (!isMinimalAcl(newAcl)) { + // This is an extended ACL. Split entries into access vs. default. + ScopedAclEntries scoped = new ScopedAclEntries(newAcl); + List accessEntries = scoped.getAccessEntries(); + List defaultEntries = scoped.getDefaultEntries(); + + // Only directories may have a default ACL. + if (!defaultEntries.isEmpty() && !inode.isDirectory()) { + throw new AclException( + "Invalid ACL: only directories may have a default ACL."); + } + + // Attach entries to the feature. + if (inode.getAclFeature() != null) { + inode.removeAclFeature(snapshotId); + } + inode.addAclFeature(createAclFeature(accessEntries, defaultEntries), + snapshotId); + newPerm = createFsPermissionForExtendedAcl(accessEntries, perm); + } else { + // This is a minimal ACL. Remove the ACL feature if it previously had one. + if (inode.getAclFeature() != null) { + inode.removeAclFeature(snapshotId); + } + newPerm = createFsPermissionForMinimalAcl(newAcl, perm); + } + + inode.setPermission(newPerm, snapshotId); + } + + /** + * There is no reason to instantiate this class. + */ + private AclStorage() { + } + + /** + * Creates an AclFeature from the given ACL entries. + * + * @param accessEntries List access ACL entries + * @param defaultEntries List default ACL entries + * @return AclFeature containing the required ACL entries + */ + private static AclFeature createAclFeature(List accessEntries, + List defaultEntries) { + // Pre-allocate list size for the explicit entries stored in the feature, + // which is all entries minus the 3 entries implicitly stored in the + // permission bits. + List featureEntries = Lists.newArrayListWithCapacity( + (accessEntries.size() - 3) + defaultEntries.size()); + + // For the access ACL, the feature only needs to hold the named user and + // group entries. For a correctly sorted ACL, these will be in a + // predictable range. + if (!isMinimalAcl(accessEntries)) { + featureEntries.addAll( + accessEntries.subList(1, accessEntries.size() - 2)); + } + + // Add all default entries to the feature. + featureEntries.addAll(defaultEntries); + return new AclFeature(ImmutableList.copyOf(featureEntries)); + } + + /** + * Creates the new FsPermission for an inode that is receiving an extended + * ACL, based on its access ACL entries. For a correctly sorted ACL, the + * first entry is the owner and the last 2 entries are the mask and other + * entries respectively. Also preserve sticky bit and toggle ACL bit on. + * + * @param accessEntries List access ACL entries + * @param existingPerm FsPermission existing permissions + * @return FsPermission new permissions + */ + private static FsPermission createFsPermissionForExtendedAcl( + List accessEntries, FsPermission existingPerm) { + return new FsPermission(accessEntries.get(0).getPermission(), + accessEntries.get(accessEntries.size() - 2).getPermission(), + accessEntries.get(accessEntries.size() - 1).getPermission(), + existingPerm.getStickyBit()); + } + + /** + * Creates the new FsPermission for an inode that is receiving a minimal ACL, + * based on its access ACL entries. For a correctly sorted ACL, the owner, + * group and other permissions are in order. Also preserve sticky bit and + * toggle ACL bit off. + * + * @param accessEntries List access ACL entries + * @param existingPerm FsPermission existing permissions + * @return FsPermission new permissions + */ + private static FsPermission createFsPermissionForMinimalAcl( + List accessEntries, FsPermission existingPerm) { + return new FsPermission(accessEntries.get(0).getPermission(), + accessEntries.get(1).getPermission(), + accessEntries.get(2).getPermission(), + existingPerm.getStickyBit()); + } + + /** + * Translates the given permission bits to the equivalent minimal ACL. + * + * @param perm FsPermission to translate + * @return List containing exactly 3 entries representing the owner, + * group and other permissions + */ + private static List getMinimalAcl(FsPermission perm) { + return Lists.newArrayList( + new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER) + .setPermission(perm.getUserAction()) + .build(), + new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.GROUP) + .setPermission(perm.getGroupAction()) + .build(), + new AclEntry.Builder() + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER) + .setPermission(perm.getOtherAction()) + .build()); + } + + /** + * Checks if the given entries represent a minimal ACL (contains exactly 3 + * entries). + * + * @param entries List entries to check + * @return boolean true if the entries represent a minimal ACL + */ + private static boolean isMinimalAcl(List entries) { + return entries.size() == 3; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java new file mode 100644 index 00000000000..44a2f3dd118 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java @@ -0,0 +1,485 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.EnumMap; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; + +import com.google.common.base.Objects; +import com.google.common.collect.ComparisonChain; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Ordering; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.AclException; + +/** + * AclTransformation defines the operations that can modify an ACL. All ACL + * modifications take as input an existing ACL and apply logic to add new + * entries, modify existing entries or remove old entries. Some operations also + * accept an ACL spec: a list of entries that further describes the requested + * change. Different operations interpret the ACL spec differently. In the + * case of adding an ACL to an inode that previously did not have one, the + * existing ACL can be a "minimal ACL" containing exactly 3 entries for owner, + * group and other, all derived from the {@link FsPermission} bits. + * + * The algorithms implemented here require sorted lists of ACL entries. For any + * existing ACL, it is assumed that the entries are sorted. This is because all + * ACL creation and modification is intended to go through these methods, and + * they all guarantee correct sort order in their outputs. However, an ACL spec + * is considered untrusted user input, so all operations pre-sort the ACL spec as + * the first step. + */ +@InterfaceAudience.Private +final class AclTransformation { + private static final int MAX_ENTRIES = 32; + + /** + * Filters (discards) any existing ACL entries that have the same scope, type + * and name of any entry in the ACL spec. If necessary, recalculates the mask + * entries. If necessary, default entries may be inferred by copying the + * permissions of the corresponding access entries. It is invalid to request + * removal of the mask entry from an ACL that would otherwise require a mask + * entry, due to existing named entries or an unnamed group entry. + * + * @param existingAcl List existing ACL + * @param inAclSpec List ACL spec describing entries to filter + * @return List new ACL + * @throws AclException if validation fails + */ + public static List filterAclEntriesByAclSpec( + List existingAcl, List inAclSpec) throws AclException { + ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); + ArrayList aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); + EnumMap providedMask = + Maps.newEnumMap(AclEntryScope.class); + EnumSet maskDirty = EnumSet.noneOf(AclEntryScope.class); + EnumSet scopeDirty = EnumSet.noneOf(AclEntryScope.class); + for (AclEntry existingEntry: existingAcl) { + if (aclSpec.containsKey(existingEntry)) { + scopeDirty.add(existingEntry.getScope()); + if (existingEntry.getType() == MASK) { + maskDirty.add(existingEntry.getScope()); + } + } else { + if (existingEntry.getType() == MASK) { + providedMask.put(existingEntry.getScope(), existingEntry); + } else { + aclBuilder.add(existingEntry); + } + } + } + copyDefaultsIfNeeded(aclBuilder); + calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); + return buildAndValidateAcl(aclBuilder); + } + + /** + * Filters (discards) any existing default ACL entries. The new ACL retains + * only the access ACL entries. + * + * @param existingAcl List existing ACL + * @return List new ACL + * @throws AclException if validation fails + */ + public static List filterDefaultAclEntries( + List existingAcl) throws AclException { + ArrayList aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); + for (AclEntry existingEntry: existingAcl) { + if (existingEntry.getScope() == DEFAULT) { + // Default entries sort after access entries, so we can exit early. + break; + } + aclBuilder.add(existingEntry); + } + return buildAndValidateAcl(aclBuilder); + } + + /** + * Merges the entries of the ACL spec into the existing ACL. If necessary, + * recalculates the mask entries. If necessary, default entries may be + * inferred by copying the permissions of the corresponding access entries. + * + * @param existingAcl List existing ACL + * @param inAclSpec List ACL spec containing entries to merge + * @return List new ACL + * @throws AclException if validation fails + */ + public static List mergeAclEntries(List existingAcl, + List inAclSpec) throws AclException { + ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); + ArrayList aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); + List foundAclSpecEntries = + Lists.newArrayListWithCapacity(MAX_ENTRIES); + EnumMap providedMask = + Maps.newEnumMap(AclEntryScope.class); + EnumSet maskDirty = EnumSet.noneOf(AclEntryScope.class); + EnumSet scopeDirty = EnumSet.noneOf(AclEntryScope.class); + for (AclEntry existingEntry: existingAcl) { + AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry); + if (aclSpecEntry != null) { + foundAclSpecEntries.add(aclSpecEntry); + scopeDirty.add(aclSpecEntry.getScope()); + if (aclSpecEntry.getType() == MASK) { + providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); + maskDirty.add(aclSpecEntry.getScope()); + } else { + aclBuilder.add(aclSpecEntry); + } + } else { + if (existingEntry.getType() == MASK) { + providedMask.put(existingEntry.getScope(), existingEntry); + } else { + aclBuilder.add(existingEntry); + } + } + } + // ACL spec entries that were not replacements are new additions. + for (AclEntry newEntry: aclSpec) { + if (Collections.binarySearch(foundAclSpecEntries, newEntry, + ACL_ENTRY_COMPARATOR) < 0) { + scopeDirty.add(newEntry.getScope()); + if (newEntry.getType() == MASK) { + providedMask.put(newEntry.getScope(), newEntry); + maskDirty.add(newEntry.getScope()); + } else { + aclBuilder.add(newEntry); + } + } + } + copyDefaultsIfNeeded(aclBuilder); + calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); + return buildAndValidateAcl(aclBuilder); + } + + /** + * Completely replaces the ACL with the entries of the ACL spec. If + * necessary, recalculates the mask entries. If necessary, default entries + * are inferred by copying the permissions of the corresponding access + * entries. Replacement occurs separately for each of the access ACL and the + * default ACL. If the ACL spec contains only access entries, then the + * existing default entries are retained. If the ACL spec contains only + * default entries, then the existing access entries are retained. If the ACL + * spec contains both access and default entries, then both are replaced. + * + * @param existingAcl List existing ACL + * @param inAclSpec List ACL spec containing replacement entries + * @return List new ACL + * @throws AclException if validation fails + */ + public static List replaceAclEntries(List existingAcl, + List inAclSpec) throws AclException { + ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); + ArrayList aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); + // Replacement is done separately for each scope: access and default. + EnumMap providedMask = + Maps.newEnumMap(AclEntryScope.class); + EnumSet maskDirty = EnumSet.noneOf(AclEntryScope.class); + EnumSet scopeDirty = EnumSet.noneOf(AclEntryScope.class); + for (AclEntry aclSpecEntry: aclSpec) { + scopeDirty.add(aclSpecEntry.getScope()); + if (aclSpecEntry.getType() == MASK) { + providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); + maskDirty.add(aclSpecEntry.getScope()); + } else { + aclBuilder.add(aclSpecEntry); + } + } + // Copy existing entries if the scope was not replaced. + for (AclEntry existingEntry: existingAcl) { + if (!scopeDirty.contains(existingEntry.getScope())) { + if (existingEntry.getType() == MASK) { + providedMask.put(existingEntry.getScope(), existingEntry); + } else { + aclBuilder.add(existingEntry); + } + } + } + copyDefaultsIfNeeded(aclBuilder); + calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); + return buildAndValidateAcl(aclBuilder); + } + + /** + * There is no reason to instantiate this class. + */ + private AclTransformation() { + } + + /** + * Comparator that enforces required ordering for entries within an ACL: + * -owner entry (unnamed user) + * -all named user entries (internal ordering undefined) + * -owning group entry (unnamed group) + * -all named group entries (internal ordering undefined) + * -mask entry + * -other entry + * All access ACL entries sort ahead of all default ACL entries. + */ + static final Comparator ACL_ENTRY_COMPARATOR = + new Comparator() { + @Override + public int compare(AclEntry entry1, AclEntry entry2) { + return ComparisonChain.start() + .compare(entry1.getScope(), entry2.getScope(), + Ordering.explicit(ACCESS, DEFAULT)) + .compare(entry1.getType(), entry2.getType(), + Ordering.explicit(USER, GROUP, MASK, OTHER)) + .compare(entry1.getName(), entry2.getName(), + Ordering.natural().nullsFirst()) + .result(); + } + }; + + /** + * Builds the final list of ACL entries to return by trimming, sorting and + * validating the ACL entries that have been added. + * + * @param aclBuilder ArrayList containing entries to build + * @return List unmodifiable, sorted list of ACL entries + * @throws AclException if validation fails + */ + private static List buildAndValidateAcl( + ArrayList aclBuilder) throws AclException { + if (aclBuilder.size() > MAX_ENTRIES) { + throw new AclException("Invalid ACL: ACL has " + aclBuilder.size() + + " entries, which exceeds maximum of " + MAX_ENTRIES + "."); + } + aclBuilder.trimToSize(); + Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR); + // Full iteration to check for duplicates and invalid named entries. + AclEntry prevEntry = null; + for (AclEntry entry: aclBuilder) { + if (prevEntry != null && + ACL_ENTRY_COMPARATOR.compare(prevEntry, entry) == 0) { + throw new AclException( + "Invalid ACL: multiple entries with same scope, type and name."); + } + if (entry.getName() != null && (entry.getType() == MASK || + entry.getType() == OTHER)) { + throw new AclException( + "Invalid ACL: this entry type must not have a name: " + entry + "."); + } + prevEntry = entry; + } + // Search for the required base access entries. If there is a default ACL, + // then do the same check on the default entries. + ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder); + for (AclEntryType type: EnumSet.of(USER, GROUP, OTHER)) { + AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS) + .setType(type).build(); + if (Collections.binarySearch(scopedEntries.getAccessEntries(), + accessEntryKey, ACL_ENTRY_COMPARATOR) < 0) { + throw new AclException( + "Invalid ACL: the user, group and other entries are required."); + } + if (!scopedEntries.getDefaultEntries().isEmpty()) { + AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT) + .setType(type).build(); + if (Collections.binarySearch(scopedEntries.getDefaultEntries(), + defaultEntryKey, ACL_ENTRY_COMPARATOR) < 0) { + throw new AclException( + "Invalid default ACL: the user, group and other entries are required."); + } + } + } + return Collections.unmodifiableList(aclBuilder); + } + + /** + * Calculates mask entries required for the ACL. Mask calculation is performed + * separately for each scope: access and default. This method is responsible + * for handling the following cases of mask calculation: + * 1. Throws an exception if the caller attempts to remove the mask entry of an + * existing ACL that requires it. If the ACL has any named entries, then a + * mask entry is required. + * 2. If the caller supplied a mask in the ACL spec, use it. + * 3. If the caller did not supply a mask, but there are ACL entry changes in + * this scope, then automatically calculate a new mask. The permissions of + * the new mask are the union of the permissions on the group entry and all + * named entries. + * + * @param aclBuilder ArrayList containing entries to build + * @param providedMask EnumMap mapping each scope to + * the mask entry that was provided for that scope (if provided) + * @param maskDirty EnumSet which contains a scope if the mask + * entry is dirty (added or deleted) in that scope + * @param scopeDirty EnumSet which contains a scope if any entry + * is dirty (added or deleted) in that scope + * @throws AclException if validation fails + */ + private static void calculateMasks(List aclBuilder, + EnumMap providedMask, + EnumSet maskDirty, EnumSet scopeDirty) + throws AclException { + EnumSet scopeFound = EnumSet.noneOf(AclEntryScope.class); + EnumMap unionPerms = + Maps.newEnumMap(AclEntryScope.class); + EnumSet maskNeeded = EnumSet.noneOf(AclEntryScope.class); + // Determine which scopes are present, which scopes need a mask, and the + // union of group class permissions in each scope. + for (AclEntry entry: aclBuilder) { + scopeFound.add(entry.getScope()); + if (entry.getType() == GROUP || entry.getName() != null) { + FsAction scopeUnionPerms = Objects.firstNonNull( + unionPerms.get(entry.getScope()), FsAction.NONE); + unionPerms.put(entry.getScope(), + scopeUnionPerms.or(entry.getPermission())); + } + if (entry.getName() != null) { + maskNeeded.add(entry.getScope()); + } + } + // Add mask entry if needed in each scope. + for (AclEntryScope scope: scopeFound) { + if (!providedMask.containsKey(scope) && maskNeeded.contains(scope) && + maskDirty.contains(scope)) { + // Caller explicitly removed mask entry, but it's required. + throw new AclException( + "Invalid ACL: mask is required, but it was deleted."); + } else if (providedMask.containsKey(scope) && + (!scopeDirty.contains(scope) || maskDirty.contains(scope))) { + // Caller explicitly provided new mask, or we are preserving the existing + // mask in an unchanged scope. + aclBuilder.add(providedMask.get(scope)); + } else if (maskNeeded.contains(scope) || providedMask.containsKey(scope)) { + // Otherwise, if there are maskable entries present, or the ACL + // previously had a mask, then recalculate a mask automatically. + aclBuilder.add(new AclEntry.Builder() + .setScope(scope) + .setType(MASK) + .setPermission(unionPerms.get(scope)) + .build()); + } + } + } + + /** + * Adds unspecified default entries by copying permissions from the + * corresponding access entries. + * + * @param aclBuilder ArrayList containing entries to build + */ + private static void copyDefaultsIfNeeded(List aclBuilder) { + Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR); + ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder); + if (!scopedEntries.getDefaultEntries().isEmpty()) { + List accessEntries = scopedEntries.getAccessEntries(); + List defaultEntries = scopedEntries.getDefaultEntries(); + List copiedEntries = Lists.newArrayListWithCapacity(3); + for (AclEntryType type: EnumSet.of(USER, GROUP, OTHER)) { + AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT) + .setType(type).build(); + int defaultEntryIndex = Collections.binarySearch(defaultEntries, + defaultEntryKey, ACL_ENTRY_COMPARATOR); + if (defaultEntryIndex < 0) { + AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS) + .setType(type).build(); + int accessEntryIndex = Collections.binarySearch(accessEntries, + accessEntryKey, ACL_ENTRY_COMPARATOR); + if (accessEntryIndex >= 0) { + copiedEntries.add(new AclEntry.Builder() + .setScope(DEFAULT) + .setType(type) + .setPermission(accessEntries.get(accessEntryIndex).getPermission()) + .build()); + } + } + } + // Add all copied entries when done to prevent potential issues with binary + // search on a modified aclBulider during the main loop. + aclBuilder.addAll(copiedEntries); + } + } + + /** + * An ACL spec that has been pre-validated and sorted. + */ + private static final class ValidatedAclSpec implements Iterable { + private final List aclSpec; + + /** + * Creates a ValidatedAclSpec by pre-validating and sorting the given ACL + * entries. Pre-validation checks that it does not exceed the maximum + * entries. This check is performed before modifying the ACL, and it's + * actually insufficient for enforcing the maximum number of entries. + * Transformation logic can create additional entries automatically,such as + * the mask and some of the default entries, so we also need additional + * checks during transformation. The up-front check is still valuable here + * so that we don't run a lot of expensive transformation logic while + * holding the namesystem lock for an attacker who intentionally sent a huge + * ACL spec. + * + * @param aclSpec List containing unvalidated input ACL spec + * @throws AclException if validation fails + */ + public ValidatedAclSpec(List aclSpec) throws AclException { + if (aclSpec.size() > MAX_ENTRIES) { + throw new AclException("Invalid ACL: ACL spec has " + aclSpec.size() + + " entries, which exceeds maximum of " + MAX_ENTRIES + "."); + } + Collections.sort(aclSpec, ACL_ENTRY_COMPARATOR); + this.aclSpec = aclSpec; + } + + /** + * Returns true if this contains an entry matching the given key. An ACL + * entry's key consists of scope, type and name (but not permission). + * + * @param key AclEntry search key + * @return boolean true if found + */ + public boolean containsKey(AclEntry key) { + return Collections.binarySearch(aclSpec, key, ACL_ENTRY_COMPARATOR) >= 0; + } + + /** + * Returns the entry matching the given key or null if not found. An ACL + * entry's key consists of scope, type and name (but not permission). + * + * @param key AclEntry search key + * @return AclEntry entry matching the given key or null if not found + */ + public AclEntry findByKey(AclEntry key) { + int index = Collections.binarySearch(aclSpec, key, ACL_ENTRY_COMPARATOR); + if (index >= 0) { + return aclSpec.get(index); + } + return null; + } + + @Override + public Iterator iterator() { + return aclSpec.iterator(); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index ffba2304478..ea23e6a25c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -39,12 +39,15 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -283,7 +286,7 @@ public class FSDirectory implements Closeable { short replication, long preferredBlockSize, String clientName, String clientMachine, DatanodeDescriptor clientNode) throws FileAlreadyExistsException, QuotaExceededException, - UnresolvedLinkException, SnapshotAccessControlException { + UnresolvedLinkException, SnapshotAccessControlException, AclException { waitForReady(); // Always do an implicit mkdirs for parent directory tree. @@ -325,6 +328,7 @@ public class FSDirectory implements Closeable { INodeFile unprotectedAddFile( long id, String path, PermissionStatus permissions, + List aclEntries, short replication, long modificationTime, long atime, @@ -347,6 +351,10 @@ public class FSDirectory implements Closeable { try { if (addINode(path, newNode)) { + if (aclEntries != null) { + AclStorage.updateINodeAcl(newNode, aclEntries, + Snapshot.CURRENT_STATE_ID); + } return newNode; } } catch (IOException e) { @@ -1168,7 +1176,8 @@ public class FSDirectory implements Closeable { if (inode == null) { throw new FileNotFoundException("File does not exist: " + src); } - inode.setPermission(permissions, inodesInPath.getLatestSnapshotId()); + int snapshotId = inodesInPath.getLatestSnapshotId(); + inode.setPermission(permissions, snapshotId); } void setOwner(String src, String username, String groupname) @@ -1615,6 +1624,14 @@ public class FSDirectory implements Closeable { */ private HdfsFileStatus getFileInfo4DotSnapshot(String src) throws UnresolvedLinkException { + if (getINode4DotSnapshot(src) != null) { + return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, + HdfsFileStatus.EMPTY_NAME, -1L, 0); + } + return null; + } + + private INode getINode4DotSnapshot(String src) throws UnresolvedLinkException { Preconditions.checkArgument( src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); @@ -1626,8 +1643,7 @@ public class FSDirectory implements Closeable { if (node != null && node.isDirectory() && node.asDirectory() instanceof INodeDirectorySnapshottable) { - return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, - HdfsFileStatus.EMPTY_NAME, -1L, 0); + return node; } return null; } @@ -1906,7 +1922,8 @@ public class FSDirectory implements Closeable { boolean mkdirs(String src, PermissionStatus permissions, boolean inheritPermission, long now) throws FileAlreadyExistsException, QuotaExceededException, - UnresolvedLinkException, SnapshotAccessControlException { + UnresolvedLinkException, SnapshotAccessControlException, + AclException { src = normalizePath(src); String[] names = INode.getPathNames(src); byte[][] components = INode.getPathComponents(names); @@ -1969,7 +1986,7 @@ public class FSDirectory implements Closeable { pathbuilder.append(Path.SEPARATOR + names[i]); unprotectedMkdir(namesystem.allocateNewInodeId(), iip, i, components[i], (i < lastInodeIndex) ? parentPermissions - : permissions, now); + : permissions, null, now); if (inodes[i] == null) { return false; } @@ -1992,14 +2009,14 @@ public class FSDirectory implements Closeable { } INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions, - long timestamp) throws QuotaExceededException, - UnresolvedLinkException { + List aclEntries, long timestamp) + throws QuotaExceededException, UnresolvedLinkException, AclException { assert hasWriteLock(); byte[][] components = INode.getPathComponents(src); INodesInPath iip = getExistingPathINodes(components); INode[] inodes = iip.getINodes(); final int pos = inodes.length - 1; - unprotectedMkdir(inodeId, iip, pos, components[pos], permissions, + unprotectedMkdir(inodeId, iip, pos, components[pos], permissions, aclEntries, timestamp); return inodes[pos]; } @@ -2009,12 +2026,16 @@ public class FSDirectory implements Closeable { * All ancestors exist. Newly created one stored at index pos. */ private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath, - int pos, byte[] name, PermissionStatus permission, long timestamp) - throws QuotaExceededException { + int pos, byte[] name, PermissionStatus permission, + List aclEntries, long timestamp) + throws QuotaExceededException, AclException { assert hasWriteLock(); final INodeDirectory dir = new INodeDirectory(inodeId, name, permission, timestamp); if (addChild(inodesInPath, pos, dir, true)) { + if (aclEntries != null) { + AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID); + } inodesInPath.setINode(pos, dir); } } @@ -2239,6 +2260,7 @@ public class FSDirectory implements Closeable { -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); } else { iip.setINode(pos - 1, child.getParent()); + AclStorage.copyINodeDefaultAcl(child); addToInodeMap(child); } return added; @@ -2625,7 +2647,7 @@ public class FSDirectory implements Closeable { INodeSymlink addSymlink(String path, String target, PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) throws UnresolvedLinkException, FileAlreadyExistsException, - QuotaExceededException, SnapshotAccessControlException { + QuotaExceededException, SnapshotAccessControlException, AclException { waitForReady(); final long modTime = now(); @@ -2669,7 +2691,154 @@ public class FSDirectory implements Closeable { target); return addINode(path, symlink) ? symlink : null; } - + + void modifyAclEntries(String src, List aclSpec) throws IOException { + writeLock(); + try { + List newAcl = unprotectedModifyAclEntries(src, aclSpec); + fsImage.getEditLog().logSetAcl(src, newAcl); + } finally { + writeUnlock(); + } + } + + private List unprotectedModifyAclEntries(String src, + List aclSpec) throws IOException { + assert hasWriteLock(); + INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INode inode = resolveLastINode(src, iip); + int snapshotId = iip.getLatestSnapshotId(); + List existingAcl = AclStorage.readINodeLogicalAcl(inode); + List newAcl = AclTransformation.mergeAclEntries(existingAcl, + aclSpec); + AclStorage.updateINodeAcl(inode, newAcl, snapshotId); + return newAcl; + } + + void removeAclEntries(String src, List aclSpec) throws IOException { + writeLock(); + try { + List newAcl = unprotectedRemoveAclEntries(src, aclSpec); + fsImage.getEditLog().logSetAcl(src, newAcl); + } finally { + writeUnlock(); + } + } + + private List unprotectedRemoveAclEntries(String src, + List aclSpec) throws IOException { + assert hasWriteLock(); + INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INode inode = resolveLastINode(src, iip); + int snapshotId = iip.getLatestSnapshotId(); + List existingAcl = AclStorage.readINodeLogicalAcl(inode); + List newAcl = AclTransformation.filterAclEntriesByAclSpec( + existingAcl, aclSpec); + AclStorage.updateINodeAcl(inode, newAcl, snapshotId); + return newAcl; + } + + void removeDefaultAcl(String src) throws IOException { + writeLock(); + try { + List newAcl = unprotectedRemoveDefaultAcl(src); + fsImage.getEditLog().logSetAcl(src, newAcl); + } finally { + writeUnlock(); + } + } + + private List unprotectedRemoveDefaultAcl(String src) + throws IOException { + assert hasWriteLock(); + INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INode inode = resolveLastINode(src, iip); + int snapshotId = iip.getLatestSnapshotId(); + List existingAcl = AclStorage.readINodeLogicalAcl(inode); + List newAcl = AclTransformation.filterDefaultAclEntries( + existingAcl); + AclStorage.updateINodeAcl(inode, newAcl, snapshotId); + return newAcl; + } + + void removeAcl(String src) throws IOException { + writeLock(); + try { + unprotectedRemoveAcl(src); + fsImage.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST); + } finally { + writeUnlock(); + } + } + + private void unprotectedRemoveAcl(String src) throws IOException { + assert hasWriteLock(); + INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INode inode = resolveLastINode(src, iip); + int snapshotId = iip.getLatestSnapshotId(); + AclStorage.removeINodeAcl(inode, snapshotId); + } + + void setAcl(String src, List aclSpec) throws IOException { + writeLock(); + try { + List newAcl = unprotectedSetAcl(src, aclSpec); + fsImage.getEditLog().logSetAcl(src, newAcl); + } finally { + writeUnlock(); + } + } + + List unprotectedSetAcl(String src, List aclSpec) + throws IOException { + // ACL removal is logged to edits as OP_SET_ACL with an empty list. + if (aclSpec.isEmpty()) { + unprotectedRemoveAcl(src); + return AclFeature.EMPTY_ENTRY_LIST; + } + + assert hasWriteLock(); + INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INode inode = resolveLastINode(src, iip); + int snapshotId = iip.getLatestSnapshotId(); + List existingAcl = AclStorage.readINodeLogicalAcl(inode); + List newAcl = AclTransformation.replaceAclEntries(existingAcl, + aclSpec); + AclStorage.updateINodeAcl(inode, newAcl, snapshotId); + return newAcl; + } + + AclStatus getAclStatus(String src) throws IOException { + String srcs = normalizePath(src); + readLock(); + try { + // There is no real inode for the path ending in ".snapshot", so return a + // non-null, unpopulated AclStatus. This is similar to getFileInfo. + if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR) && + getINode4DotSnapshot(srcs) != null) { + return new AclStatus.Builder().owner("").group("").build(); + } + INodesInPath iip = rootDir.getLastINodeInPath(srcs, true); + INode inode = resolveLastINode(src, iip); + int snapshotId = iip.getPathSnapshotId(); + List acl = AclStorage.readINodeAcl(inode, snapshotId); + return new AclStatus.Builder() + .owner(inode.getUserName()).group(inode.getGroupName()) + .stickyBit(inode.getFsPermission(snapshotId).getStickyBit()) + .addEntries(acl).build(); + } finally { + readUnlock(); + } + } + + private static INode resolveLastINode(String src, INodesInPath iip) + throws FileNotFoundException { + INode inode = iip.getLastINode(); + if (inode == null) + throw new FileNotFoundException("cannot find " + src); + return inode; + } + /** * Caches frequently used file names to reuse file name objects and * reduce heap size. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 8965295cd63..36c3ffbaf06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -33,7 +33,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -70,6 +72,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp; @@ -681,6 +684,7 @@ public class FSEditLog implements LogsPurgeable { */ public void logOpenFile(String path, INodeFile newNode, boolean toLogRpcIds) { Preconditions.checkArgument(newNode.isUnderConstruction()); + PermissionStatus permissions = newNode.getPermissionStatus(); AddOp op = AddOp.getInstance(cache.get()) .setInodeId(newNode.getId()) .setPath(path) @@ -689,9 +693,14 @@ public class FSEditLog implements LogsPurgeable { .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) .setBlocks(newNode.getBlocks()) - .setPermissionStatus(newNode.getPermissionStatus()) + .setPermissionStatus(permissions) .setClientName(newNode.getFileUnderConstructionFeature().getClientName()) .setClientMachine(newNode.getFileUnderConstructionFeature().getClientMachine()); + + AclFeature f = newNode.getAclFeature(); + if (f != null) { + op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode)); + } logRpcIds(op, toLogRpcIds); logEdit(op); } @@ -736,11 +745,17 @@ public class FSEditLog implements LogsPurgeable { * Add create directory record to edit log */ public void logMkDir(String path, INode newNode) { + PermissionStatus permissions = newNode.getPermissionStatus(); MkdirOp op = MkdirOp.getInstance(cache.get()) .setInodeId(newNode.getId()) .setPath(path) .setTimestamp(newNode.getModificationTime()) - .setPermissionStatus(newNode.getPermissionStatus()); + .setPermissionStatus(permissions); + + AclFeature f = newNode.getAclFeature(); + if (f != null) { + op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode)); + } logEdit(op); } @@ -1014,6 +1029,13 @@ public class FSEditLog implements LogsPurgeable { logEdit(op); } + void logSetAcl(String src, List entries) { + SetAclOp op = SetAclOp.getInstance(); + op.src = src; + op.aclEntries = entries; + logEdit(op); + } + /** * Get all the journals this edit log is currently operating on. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 33a2dabc62b..2e0da00e8a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp; @@ -323,9 +324,10 @@ public class FSEditLogLoader { inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion, lastInodeId); newFile = fsDir.unprotectedAddFile(inodeId, - path, addCloseOp.permissions, replication, - addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true, - addCloseOp.clientName, addCloseOp.clientMachine); + path, addCloseOp.permissions, addCloseOp.aclEntries, + replication, addCloseOp.mtime, addCloseOp.atime, + addCloseOp.blockSize, true, addCloseOp.clientName, + addCloseOp.clientMachine); fsNamesys.leaseManager.addLease(addCloseOp.clientName, path); // add the op into retry cache if necessary @@ -485,7 +487,7 @@ public class FSEditLogLoader { lastInodeId); fsDir.unprotectedMkdir(inodeId, renameReservedPathsOnUpgrade(mkdirOp.path, logVersion), - mkdirOp.permissions, mkdirOp.timestamp); + mkdirOp.permissions, mkdirOp.aclEntries, mkdirOp.timestamp); break; } case OP_SET_GENSTAMP_V1: { @@ -744,6 +746,11 @@ public class FSEditLogLoader { } break; } + case OP_SET_ACL: { + SetAclOp setAclOp = (SetAclOp) op; + fsDir.unprotectedSetAcl(setAclOp.src, setAclOp.aclEntries); + break; + } default: throw new IOException("Invalid operation read " + op.opCode); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 1f17841c2fe..6ea05af4d6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -44,6 +44,7 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_OLD; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_SNAPSHOT; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_ACL; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_GENSTAMP_V1; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_GENSTAMP_V2; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_NS_QUOTA; @@ -76,6 +77,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -87,6 +92,8 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; @@ -109,6 +116,8 @@ import org.xml.sax.helpers.AttributesImpl; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; /** * Helper classes for reading the ops from an InputStream. @@ -170,6 +179,7 @@ public abstract class FSEditLogOp { inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp()); inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp()); inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp()); + inst.put(OP_SET_ACL, new SetAclOp()); } public FSEditLogOp get(FSEditLogOpCodes opcode) { @@ -177,6 +187,16 @@ public abstract class FSEditLogOp { } } + private static ImmutableMap fsActionMap() { + ImmutableMap.Builder b = ImmutableMap.builder(); + for (FsAction v : FsAction.values()) + b.put(v.SYMBOL, v); + return b.build(); + } + + private static final ImmutableMap FSACTION_SYMBOL_MAP + = fsActionMap(); + /** * Constructor for an EditLog Op. EditLog ops cannot be constructed * directly, but only through Reader#readOp. @@ -278,7 +298,76 @@ public abstract class FSEditLogOp { XMLUtils.addSaxString(contentHandler, "RPC_CALLID", Integer.valueOf(callId).toString()); } - + + private static final class AclEditLogUtil { + private static final int ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET = 6; + private static final int ACL_EDITLOG_ENTRY_TYPE_OFFSET = 3; + private static final int ACL_EDITLOG_ENTRY_SCOPE_OFFSET = 5; + private static final int ACL_EDITLOG_PERM_MASK = 7; + private static final int ACL_EDITLOG_ENTRY_TYPE_MASK = 3; + private static final int ACL_EDITLOG_ENTRY_SCOPE_MASK = 1; + + private static final FsAction[] FSACTION_VALUES = FsAction.values(); + private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope + .values(); + private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType + .values(); + + private static List read(DataInputStream in, int logVersion) + throws IOException { + if (!LayoutVersion.supports(Feature.EXTENDED_ACL, logVersion)) { + return null; + } + + int size = in.readInt(); + if (size == 0) { + return null; + } + + List aclEntries = Lists.newArrayListWithCapacity(size); + for (int i = 0; i < size; ++i) { + int v = in.read(); + int p = v & ACL_EDITLOG_PERM_MASK; + int t = (v >> ACL_EDITLOG_ENTRY_TYPE_OFFSET) + & ACL_EDITLOG_ENTRY_TYPE_MASK; + int s = (v >> ACL_EDITLOG_ENTRY_SCOPE_OFFSET) + & ACL_EDITLOG_ENTRY_SCOPE_MASK; + boolean hasName = ((v >> ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET) & 1) == 1; + String name = hasName ? FSImageSerialization.readString(in) : null; + aclEntries.add(new AclEntry.Builder().setName(name) + .setPermission(FSACTION_VALUES[p]) + .setScope(ACL_ENTRY_SCOPE_VALUES[s]) + .setType(ACL_ENTRY_TYPE_VALUES[t]).build()); + } + + return aclEntries; + } + + private static void write(List aclEntries, DataOutputStream out) + throws IOException { + if (aclEntries == null) { + out.writeInt(0); + return; + } + + out.writeInt(aclEntries.size()); + for (AclEntry e : aclEntries) { + boolean hasName = e.getName() != null; + int v = (e.getScope().ordinal() << ACL_EDITLOG_ENTRY_SCOPE_OFFSET) + | (e.getType().ordinal() << ACL_EDITLOG_ENTRY_TYPE_OFFSET) + | e.getPermission().ordinal(); + + if (hasName) { + v |= 1 << ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET; + } + out.write(v); + if (hasName) { + FSImageSerialization.writeString(e.getName(), out); + } + } + } + } + @SuppressWarnings("unchecked") static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatingOp { int length; @@ -290,6 +379,7 @@ public abstract class FSEditLogOp { long blockSize; Block[] blocks; PermissionStatus permissions; + List aclEntries; String clientName; String clientMachine; @@ -352,6 +442,11 @@ public abstract class FSEditLogOp { return (T)this; } + T setAclEntries(List aclEntries) { + this.aclEntries = aclEntries; + return (T)this; + } + T setClientName(String clientName) { this.clientName = clientName; return (T)this; @@ -374,6 +469,7 @@ public abstract class FSEditLogOp { permissions.write(out); if (this.opCode == OP_ADD) { + AclEditLogUtil.write(aclEntries, out); FSImageSerialization.writeString(clientName,out); FSImageSerialization.writeString(clientMachine,out); // write clientId and callId @@ -432,6 +528,7 @@ public abstract class FSEditLogOp { // clientname, clientMachine and block locations of last block. if (this.opCode == OP_ADD) { + aclEntries = AclEditLogUtil.read(in, logVersion); this.clientName = FSImageSerialization.readString(in); this.clientMachine = FSImageSerialization.readString(in); // read clientId and callId @@ -483,6 +580,8 @@ public abstract class FSEditLogOp { builder.append(Arrays.toString(blocks)); builder.append(", permissions="); builder.append(permissions); + builder.append(", aclEntries="); + builder.append(aclEntries); builder.append(", clientName="); builder.append(clientName); builder.append(", clientMachine="); @@ -520,6 +619,9 @@ public abstract class FSEditLogOp { } FSEditLogOp.permissionStatusToXml(contentHandler, permissions); if (this.opCode == OP_ADD) { + if (aclEntries != null) { + appendAclEntriesToXml(contentHandler, aclEntries); + } appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId); } } @@ -545,6 +647,7 @@ public abstract class FSEditLogOp { this.blocks = new Block[0]; } this.permissions = permissionStatusFromXml(st); + aclEntries = readAclEntriesFromXml(st); readRpcIdsFromXml(st); } } @@ -1211,6 +1314,7 @@ public abstract class FSEditLogOp { String path; long timestamp; PermissionStatus permissions; + List aclEntries; private MkdirOp() { super(OP_MKDIR); @@ -1240,6 +1344,11 @@ public abstract class FSEditLogOp { return this; } + MkdirOp setAclEntries(List aclEntries) { + this.aclEntries = aclEntries; + return this; + } + @Override public void writeFields(DataOutputStream out) throws IOException { @@ -1248,6 +1357,7 @@ public abstract class FSEditLogOp { FSImageSerialization.writeLong(timestamp, out); // mtime FSImageSerialization.writeLong(timestamp, out); // atime, unused at this permissions.write(out); + AclEditLogUtil.write(aclEntries, out); } @Override @@ -1285,6 +1395,7 @@ public abstract class FSEditLogOp { } this.permissions = PermissionStatus.read(in); + aclEntries = AclEditLogUtil.read(in, logVersion); } @Override @@ -1300,6 +1411,8 @@ public abstract class FSEditLogOp { builder.append(timestamp); builder.append(", permissions="); builder.append(permissions); + builder.append(", aclEntries="); + builder.append(aclEntries); builder.append(", opCode="); builder.append(opCode); builder.append(", txid="); @@ -1318,6 +1431,9 @@ public abstract class FSEditLogOp { XMLUtils.addSaxString(contentHandler, "TIMESTAMP", Long.valueOf(timestamp).toString()); FSEditLogOp.permissionStatusToXml(contentHandler, permissions); + if (aclEntries != null) { + appendAclEntriesToXml(contentHandler, aclEntries); + } } @Override void fromXml(Stanza st) throws InvalidXmlException { @@ -1326,6 +1442,7 @@ public abstract class FSEditLogOp { this.path = st.getValue("PATH"); this.timestamp = Long.valueOf(st.getValue("TIMESTAMP")); this.permissions = permissionStatusFromXml(st); + aclEntries = readAclEntriesFromXml(st); } } @@ -3338,6 +3455,50 @@ public abstract class FSEditLogOp { } } + static class SetAclOp extends FSEditLogOp { + List aclEntries = Lists.newArrayList(); + String src; + + private SetAclOp() { + super(OP_SET_ACL); + } + + static SetAclOp getInstance() { + return new SetAclOp(); + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + AclEditLogProto p = AclEditLogProto.parseDelimitedFrom((DataInputStream)in); + src = p.getSrc(); + aclEntries = PBHelper.convertAclEntry(p.getEntriesList()); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + AclEditLogProto.Builder b = AclEditLogProto.newBuilder(); + if (src != null) + b.setSrc(src); + b.addAllEntries(PBHelper.convertAclEntryProto(aclEntries)); + b.build().writeDelimitedTo(out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "SRC", src); + appendAclEntriesToXml(contentHandler, aclEntries); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + src = st.getValue("SRC"); + aclEntries = readAclEntriesFromXml(st); + if (aclEntries == null) { + aclEntries = Lists.newArrayList(); + } + } + } + static private short readShort(DataInputStream in) throws IOException { return Short.parseShort(FSImageSerialization.readString(in)); } @@ -3747,4 +3908,45 @@ public abstract class FSEditLogOp { short mode = Short.valueOf(st.getValue("MODE")); return new FsPermission(mode); } + + private static void fsActionToXml(ContentHandler contentHandler, FsAction v) + throws SAXException { + XMLUtils.addSaxString(contentHandler, "PERM", v.SYMBOL); + } + + private static FsAction fsActionFromXml(Stanza st) throws InvalidXmlException { + FsAction v = FSACTION_SYMBOL_MAP.get(st.getValue("PERM")); + if (v == null) + throw new InvalidXmlException("Invalid value for FsAction"); + return v; + } + + private static void appendAclEntriesToXml(ContentHandler contentHandler, + List aclEntries) throws SAXException { + for (AclEntry e : aclEntries) { + contentHandler.startElement("", "", "ENTRY", new AttributesImpl()); + XMLUtils.addSaxString(contentHandler, "SCOPE", e.getScope().name()); + XMLUtils.addSaxString(contentHandler, "TYPE", e.getType().name()); + XMLUtils.addSaxString(contentHandler, "NAME", e.getName()); + fsActionToXml(contentHandler, e.getPermission()); + contentHandler.endElement("", "", "ENTRY"); + } + } + + private static List readAclEntriesFromXml(Stanza st) { + List aclEntries = Lists.newArrayList(); + if (!st.hasChildren("ENTRY")) + return null; + + List stanzas = st.getChildren("ENTRY"); + for (Stanza s : stanzas) { + AclEntry e = new AclEntry.Builder() + .setScope(AclEntryScope.valueOf(s.getValue("SCOPE"))) + .setType(AclEntryType.valueOf(s.getValue("TYPE"))) + .setName(s.getValue("NAME")) + .setPermission(fsActionFromXml(s)).build(); + aclEntries.add(e); + } + return aclEntries; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java index 03e2025c7b6..35b184f1db7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java @@ -67,6 +67,7 @@ public enum FSEditLogOpCodes { OP_MODIFY_CACHE_POOL ((byte) 37), OP_REMOVE_CACHE_POOL ((byte) 38), OP_MODIFY_CACHE_DIRECTIVE ((byte) 39), + OP_SET_ACL ((byte) 40), // Note that the current range of the valid OP code is 0~127 OP_INVALID ((byte) -1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 8f16e9c803d..591e4a95e79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -772,12 +772,10 @@ public class FSImageFormat { modificationTime, atime, blocks, replication, blockSize); if (underConstruction) { file.toUnderConstruction(clientName, clientMachine, null); - return fileDiffs == null ? file : new INodeFile(file, fileDiffs); - } else { - return fileDiffs == null ? file : new INodeFile(file, fileDiffs); } - } else if (numBlocks == -1) { - //directory + return fileDiffs == null ? file : new INodeFile(file, fileDiffs); + } else if (numBlocks == -1) { + //directory //read quotas final long nsQuota = in.readLong(); @@ -867,8 +865,8 @@ public class FSImageFormat { final short replication = namesystem.getBlockManager().adjustReplication( in.readShort()); final long preferredBlockSize = in.readLong(); - - return new INodeFileAttributes.SnapshotCopy(name, permissions, modificationTime, + + return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime, accessTime, replication, preferredBlockSize); } @@ -889,9 +887,9 @@ public class FSImageFormat { final long dsQuota = in.readLong(); return nsQuota == -1L && dsQuota == -1L? - new INodeDirectoryAttributes.SnapshotCopy(name, permissions, modificationTime) + new INodeDirectoryAttributes.SnapshotCopy(name, permissions, null, modificationTime) : new INodeDirectoryAttributes.CopyWithQuota(name, permissions, - modificationTime, nsQuota, dsQuota); + null, modificationTime, nsQuota, dsQuota); } private void loadFilesUnderConstruction(DataInput in, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index f28189eb84d..3c3ca49fc4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -30,6 +30,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; @@ -38,15 +42,18 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; +import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; import com.google.protobuf.ByteString; @InterfaceAudience.Private @@ -56,6 +63,19 @@ public final class FSImageFormatPBINode { private final static int GROUP_STRID_OFFSET = 16; private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class); + private static final int ACL_ENTRY_NAME_MASK = (1 << 24) - 1; + private static final int ACL_ENTRY_NAME_OFFSET = 6; + private static final int ACL_ENTRY_TYPE_OFFSET = 3; + private static final int ACL_ENTRY_SCOPE_OFFSET = 5; + private static final int ACL_ENTRY_PERM_MASK = 7; + private static final int ACL_ENTRY_TYPE_MASK = 3; + private static final int ACL_ENTRY_SCOPE_MASK = 1; + private static final FsAction[] FSACTION_VALUES = FsAction.values(); + private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope + .values(); + private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType + .values(); + public final static class Loader { public static PermissionStatus loadPermission(long id, final String[] stringTable) { @@ -66,13 +86,30 @@ public final class FSImageFormatPBINode { new FsPermission(perm)); } + public static ImmutableList loadAclEntries( + AclFeatureProto proto, final String[] stringTable) { + ImmutableList.Builder b = ImmutableList.builder(); + for (int v : proto.getEntriesList()) { + int p = v & ACL_ENTRY_PERM_MASK; + int t = (v >> ACL_ENTRY_TYPE_OFFSET) & ACL_ENTRY_TYPE_MASK; + int s = (v >> ACL_ENTRY_SCOPE_OFFSET) & ACL_ENTRY_SCOPE_MASK; + int nid = (v >> ACL_ENTRY_NAME_OFFSET) & ACL_ENTRY_NAME_MASK; + String name = stringTable[nid]; + b.add(new AclEntry.Builder().setName(name) + .setPermission(FSACTION_VALUES[p]) + .setScope(ACL_ENTRY_SCOPE_VALUES[s]) + .setType(ACL_ENTRY_TYPE_VALUES[t]).build()); + } + return b.build(); + } + public static INodeDirectory loadINodeDirectory(INodeSection.INode n, - final String[] stringTable) { + LoaderContext state) { assert n.getType() == INodeSection.INode.Type.DIRECTORY; INodeSection.INodeDirectory d = n.getDirectory(); final PermissionStatus permissions = loadPermission(d.getPermission(), - stringTable); + state.getStringTable()); final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName() .toByteArray(), permissions, d.getModificationTime()); @@ -80,6 +117,11 @@ public final class FSImageFormatPBINode { if (nsQuota >= 0 || dsQuota >= 0) { dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota); } + + if (d.hasAcl()) { + dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(), + state.getStringTable()))); + } return dir; } @@ -181,7 +223,7 @@ public final class FSImageFormatPBINode { case FILE: return loadINodeFile(n); case DIRECTORY: - return loadINodeDirectory(n, parent.getLoaderContext().getStringTable()); + return loadINodeDirectory(n, parent.getLoaderContext()); case SYMLINK: return loadINodeSymlink(n); default: @@ -195,6 +237,7 @@ public final class FSImageFormatPBINode { INodeSection.INodeFile f = n.getFile(); List bp = f.getBlocksList(); short replication = (short) f.getReplication(); + LoaderContext state = parent.getLoaderContext(); BlockInfo[] blocks = new BlockInfo[bp.size()]; for (int i = 0, e = bp.size(); i < e; ++i) { @@ -206,6 +249,12 @@ public final class FSImageFormatPBINode { final INodeFile file = new INodeFile(n.getId(), n.getName().toByteArray(), permissions, f.getModificationTime(), f.getAccessTime(), blocks, replication, f.getPreferredBlockSize()); + + if (f.hasAcl()) { + file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(), + state.getStringTable()))); + } + // under-construction information if (f.hasFileUC()) { INodeSection.FileUnderConstructionFeature uc = f.getFileUC(); @@ -234,8 +283,7 @@ public final class FSImageFormatPBINode { } private void loadRootINode(INodeSection.INode p) { - INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext() - .getStringTable()); + INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext()); final Quota.Counts q = root.getQuotaCounts(); final long nsQuota = q.get(Quota.NAMESPACE); final long dsQuota = q.get(Quota.DISKSPACE); @@ -257,27 +305,48 @@ public final class FSImageFormatPBINode { | n.getFsPermissionShort(); } + private static AclFeatureProto.Builder buildAclEntries(AclFeature f, + final SaverContext.DeduplicationMap map) { + AclFeatureProto.Builder b = AclFeatureProto.newBuilder(); + for (AclEntry e : f.getEntries()) { + int v = ((map.getId(e.getName()) & ACL_ENTRY_NAME_MASK) << ACL_ENTRY_NAME_OFFSET) + | (e.getType().ordinal() << ACL_ENTRY_TYPE_OFFSET) + | (e.getScope().ordinal() << ACL_ENTRY_SCOPE_OFFSET) + | (e.getPermission().ordinal()); + b.addEntries(v); + } + return b; + } + public static INodeSection.INodeFile.Builder buildINodeFile( - INodeFileAttributes file, - final SaverContext.DeduplicationMap stringMap) { + INodeFileAttributes file, final SaverContext state) { INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder() .setAccessTime(file.getAccessTime()) .setModificationTime(file.getModificationTime()) - .setPermission(buildPermissionStatus(file, stringMap)) + .setPermission(buildPermissionStatus(file, state.getStringMap())) .setPreferredBlockSize(file.getPreferredBlockSize()) .setReplication(file.getFileReplication()); + + AclFeature f = file.getAclFeature(); + if (f != null) { + b.setAcl(buildAclEntries(f, state.getStringMap())); + } return b; } public static INodeSection.INodeDirectory.Builder buildINodeDirectory( - INodeDirectoryAttributes dir, - final SaverContext.DeduplicationMap stringMap) { + INodeDirectoryAttributes dir, final SaverContext state) { Quota.Counts quota = dir.getQuotaCounts(); INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory .newBuilder().setModificationTime(dir.getModificationTime()) .setNsQuota(quota.get(Quota.NAMESPACE)) .setDsQuota(quota.get(Quota.DISKSPACE)) - .setPermission(buildPermissionStatus(dir, stringMap)); + .setPermission(buildPermissionStatus(dir, state.getStringMap())); + + AclFeature f = dir.getAclFeature(); + if (f != null) { + b.setAcl(buildAclEntries(f, state.getStringMap())); + } return b; } @@ -378,7 +447,7 @@ public final class FSImageFormatPBINode { private void save(OutputStream out, INodeDirectory n) throws IOException { INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n, - parent.getSaverContext().getStringMap()); + parent.getSaverContext()); INodeSection.INode r = buildINodeCommon(n) .setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build(); r.writeDelimitedTo(out); @@ -386,7 +455,7 @@ public final class FSImageFormatPBINode { private void save(OutputStream out, INodeFile n) throws IOException { INodeSection.INodeFile.Builder b = buildINodeFile(n, - parent.getSaverContext().getStringMap()); + parent.getSaverContext()); for (Block block : n.getBlocks()) { b.addBlocks(PBHelper.convert(block)); @@ -407,12 +476,14 @@ public final class FSImageFormatPBINode { } private void save(OutputStream out, INodeSymlink n) throws IOException { + SaverContext state = parent.getSaverContext(); INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink .newBuilder() - .setPermission(buildPermissionStatus(n, parent.getSaverContext().getStringMap())) + .setPermission(buildPermissionStatus(n, state.getStringMap())) .setTarget(ByteString.copyFrom(n.getSymlink())) .setModificationTime(n.getModificationTime()) .setAccessTime(n.getAccessTime()); + INodeSection.INode r = buildINodeCommon(n) .setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build(); r.writeDelimitedTo(out); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index 9edb15a9008..3fb30cca5df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -116,9 +116,11 @@ public final class FSImageFormatProtobuf { return map.entrySet(); } } - private final DeduplicationMap stringMap = DeduplicationMap.newMap(); private final ArrayList refList = Lists.newArrayList(); + private final DeduplicationMap stringMap = DeduplicationMap + .newMap(); + public DeduplicationMap getStringMap() { return stringMap; } @@ -547,6 +549,7 @@ public final class FSImageFormatProtobuf { public enum SectionName { NS_INFO("NS_INFO"), STRING_TABLE("STRING_TABLE"), + EXTENDED_ACL("EXTENDED_ACL"), INODE("INODE"), INODE_REFERENCE("INODE_REFERENCE"), SNAPSHOT("SNAPSHOT"), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index d7fb5c6c02b..2eb4106d43b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -143,6 +143,8 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; @@ -490,7 +492,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, private INodeId inodeId; private final RetryCache retryCache; - + + private final AclConfigFlag aclConfigFlag; + /** * Set the last allocated inode id when fsimage or editlog is loaded. */ @@ -757,6 +761,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, this.isDefaultAuditLogger = auditLoggers.size() == 1 && auditLoggers.get(0) instanceof DefaultAuditLogger; this.retryCache = ignoreRetryCache ? null : initRetryCache(conf); + this.aclConfigFlag = new AclConfigFlag(conf); } catch(IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); @@ -7365,6 +7370,127 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return results; } + void modifyAclEntries(String src, List aclSpec) throws IOException { + aclConfigFlag.checkForApiCall(); + HdfsFileStatus resultingStat = null; + FSPermissionChecker pc = getPermissionChecker(); + checkOperation(OperationCategory.WRITE); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot modify ACL entries on " + src); + src = FSDirectory.resolvePath(src, pathComponents, dir); + checkOwner(pc, src); + dir.modifyAclEntries(src, aclSpec); + resultingStat = getAuditFileInfo(src, false); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "modifyAclEntries", src, null, resultingStat); + } + + void removeAclEntries(String src, List aclSpec) throws IOException { + aclConfigFlag.checkForApiCall(); + HdfsFileStatus resultingStat = null; + FSPermissionChecker pc = getPermissionChecker(); + checkOperation(OperationCategory.WRITE); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot remove ACL entries on " + src); + src = FSDirectory.resolvePath(src, pathComponents, dir); + checkOwner(pc, src); + dir.removeAclEntries(src, aclSpec); + resultingStat = getAuditFileInfo(src, false); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "removeAclEntries", src, null, resultingStat); + } + + void removeDefaultAcl(String src) throws IOException { + aclConfigFlag.checkForApiCall(); + HdfsFileStatus resultingStat = null; + FSPermissionChecker pc = getPermissionChecker(); + checkOperation(OperationCategory.WRITE); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot remove default ACL entries on " + src); + src = FSDirectory.resolvePath(src, pathComponents, dir); + checkOwner(pc, src); + dir.removeDefaultAcl(src); + resultingStat = getAuditFileInfo(src, false); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "removeDefaultAcl", src, null, resultingStat); + } + + void removeAcl(String src) throws IOException { + aclConfigFlag.checkForApiCall(); + HdfsFileStatus resultingStat = null; + FSPermissionChecker pc = getPermissionChecker(); + checkOperation(OperationCategory.WRITE); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot remove ACL on " + src); + src = FSDirectory.resolvePath(src, pathComponents, dir); + checkOwner(pc, src); + dir.removeAcl(src); + resultingStat = getAuditFileInfo(src, false); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "removeAcl", src, null, resultingStat); + } + + void setAcl(String src, List aclSpec) throws IOException { + aclConfigFlag.checkForApiCall(); + HdfsFileStatus resultingStat = null; + FSPermissionChecker pc = getPermissionChecker(); + checkOperation(OperationCategory.WRITE); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot set ACL on " + src); + src = FSDirectory.resolvePath(src, pathComponents, dir); + checkOwner(pc, src); + dir.setAcl(src, aclSpec); + resultingStat = getAuditFileInfo(src, false); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "setAcl", src, null, resultingStat); + } + + AclStatus getAclStatus(String src) throws IOException { + aclConfigFlag.checkForApiCall(); + FSPermissionChecker pc = getPermissionChecker(); + checkOperation(OperationCategory.READ); + readLock(); + try { + checkOperation(OperationCategory.READ); + if (isPermissionEnabled) { + checkPermission(pc, src, false, null, null, null, null); + } + return dir.getAclStatus(src); + } finally { + readUnlock(); + } + } + /** * Default AuditLogger implementation; used when no access logger is * defined in the config file. It can also be explicitly listed in the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 098fc17393a..0e10273951a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -20,16 +20,21 @@ package org.apache.hadoop.hdfs.server.namenode; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.Stack; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.StringUtils; /** * Class that helps in checking file system permission. @@ -42,12 +47,27 @@ class FSPermissionChecker { static final Log LOG = LogFactory.getLog(UserGroupInformation.class); /** @return a string for throwing {@link AccessControlException} */ - private static String toAccessControlString(INode inode) { - return "\"" + inode.getFullPathName() + "\":" - + inode.getUserName() + ":" + inode.getGroupName() - + ":" + (inode.isDirectory()? "d": "-") + inode.getFsPermission(); + private String toAccessControlString(INode inode, int snapshotId, + FsAction access, FsPermission mode) { + return toAccessControlString(inode, snapshotId, access, mode, null); } + /** @return a string for throwing {@link AccessControlException} */ + private String toAccessControlString(INode inode, int snapshotId, + FsAction access, FsPermission mode, List featureEntries) { + StringBuilder sb = new StringBuilder("Permission denied: ") + .append("user=").append(user).append(", ") + .append("access=").append(access).append(", ") + .append("inode=\"").append(inode.getFullPathName()).append("\":") + .append(inode.getUserName(snapshotId)).append(':') + .append(inode.getGroupName(snapshotId)).append(':') + .append(inode.isDirectory() ? 'd' : '-') + .append(mode); + if (featureEntries != null) { + sb.append(':').append(StringUtils.join(",", featureEntries)); + } + return sb.toString(); + } private final UserGroupInformation ugi; private final String user; @@ -219,7 +239,20 @@ class FSPermissionChecker { return; } FsPermission mode = inode.getFsPermission(snapshotId); + AclFeature aclFeature = inode.getAclFeature(snapshotId); + if (aclFeature != null) { + List featureEntries = aclFeature.getEntries(); + // It's possible that the inode has a default ACL but no access ACL. + if (featureEntries.get(0).getScope() == AclEntryScope.ACCESS) { + checkAccessAcl(inode, snapshotId, access, mode, featureEntries); + return; + } + } + checkFsPermission(inode, snapshotId, access, mode); + } + private void checkFsPermission(INode inode, int snapshotId, FsAction access, + FsPermission mode) throws AccessControlException { if (user.equals(inode.getUserName(snapshotId))) { //user class if (mode.getUserAction().implies(access)) { return; } } @@ -229,8 +262,88 @@ class FSPermissionChecker { else { //other class if (mode.getOtherAction().implies(access)) { return; } } - throw new AccessControlException("Permission denied: user=" + user - + ", access=" + access + ", inode=" + toAccessControlString(inode)); + throw new AccessControlException( + toAccessControlString(inode, snapshotId, access, mode)); + } + + /** + * Checks requested access against an Access Control List. This method relies + * on finding the ACL data in the relevant portions of {@link FsPermission} and + * {@link AclFeature} as implemented in the logic of {@link AclStorage}. This + * method also relies on receiving the ACL entries in sorted order. This is + * assumed to be true, because the ACL modification methods in + * {@link AclTransformation} sort the resulting entries. + * + * More specifically, this method depends on these invariants in an ACL: + * - The list must be sorted. + * - Each entry in the list must be unique by scope + type + name. + * - There is exactly one each of the unnamed user/group/other entries. + * - The mask entry must not have a name. + * - The other entry must not have a name. + * - Default entries may be present, but they are ignored during enforcement. + * + * @param inode INode accessed inode + * @param snapshotId int snapshot ID + * @param access FsAction requested permission + * @param mode FsPermission mode from inode + * @param featureEntries List ACL entries from AclFeature of inode + * @throws AccessControlException if the ACL denies permission + */ + private void checkAccessAcl(INode inode, int snapshotId, FsAction access, + FsPermission mode, List featureEntries) + throws AccessControlException { + boolean foundMatch = false; + + // Use owner entry from permission bits if user is owner. + if (user.equals(inode.getUserName(snapshotId))) { + if (mode.getUserAction().implies(access)) { + return; + } + foundMatch = true; + } + + // Check named user and group entries if user was not denied by owner entry. + if (!foundMatch) { + for (AclEntry entry: featureEntries) { + if (entry.getScope() == AclEntryScope.DEFAULT) { + break; + } + AclEntryType type = entry.getType(); + String name = entry.getName(); + if (type == AclEntryType.USER) { + // Use named user entry with mask from permission bits applied if user + // matches name. + if (user.equals(name)) { + FsAction masked = entry.getPermission().and(mode.getGroupAction()); + if (masked.implies(access)) { + return; + } + foundMatch = true; + } + } else if (type == AclEntryType.GROUP) { + // Use group entry (unnamed or named) with mask from permission bits + // applied if user is a member and entry grants access. If user is a + // member of multiple groups that have entries that grant access, then + // it doesn't matter which is chosen, so exit early after first match. + String group = name == null ? inode.getGroupName(snapshotId) : name; + if (groups.contains(group)) { + FsAction masked = entry.getPermission().and(mode.getGroupAction()); + if (masked.implies(access)) { + return; + } + foundMatch = true; + } + } + } + } + + // Use other entry if user was not denied by an earlier match. + if (!foundMatch && mode.getOtherAction().implies(access)) { + return; + } + + throw new AccessControlException( + toAccessControlString(inode, snapshotId, access, mode, featureEntries)); } /** Guarded by {@link FSNamesystem#readLock()} */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 5efb2a7e04e..9242812bb7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -154,6 +154,31 @@ public abstract class INode implements INodeAttributes, Diff.Element { return nodeToUpdate; } + abstract AclFeature getAclFeature(int snapshotId); + + @Override + public final AclFeature getAclFeature() { + return getAclFeature(Snapshot.CURRENT_STATE_ID); + } + + abstract void addAclFeature(AclFeature aclFeature); + + final INode addAclFeature(AclFeature aclFeature, int latestSnapshotId) + throws QuotaExceededException { + final INode nodeToUpdate = recordModification(latestSnapshotId); + nodeToUpdate.addAclFeature(aclFeature); + return nodeToUpdate; + } + + abstract void removeAclFeature(); + + final INode removeAclFeature(int latestSnapshotId) + throws QuotaExceededException { + final INode nodeToUpdate = recordModification(latestSnapshotId); + nodeToUpdate.removeAclFeature(); + return nodeToUpdate; + } + /** * @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID}, * return this; otherwise return the corresponding snapshot inode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java index 90ee39f5da3..b6c9b981082 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java @@ -48,6 +48,9 @@ public interface INodeAttributes { /** @return the permission information as a long. */ public long getPermissionLong(); + /** @return the ACL feature. */ + public AclFeature getAclFeature(); + /** @return the modification time. */ public long getModificationTime(); @@ -58,13 +61,15 @@ public interface INodeAttributes { public static abstract class SnapshotCopy implements INodeAttributes { private final byte[] name; private final long permission; + private final AclFeature aclFeature; private final long modificationTime; private final long accessTime; SnapshotCopy(byte[] name, PermissionStatus permissions, - long modificationTime, long accessTime) { + AclFeature aclFeature, long modificationTime, long accessTime) { this.name = name; this.permission = PermissionStatusFormat.toLong(permissions); + this.aclFeature = aclFeature; this.modificationTime = modificationTime; this.accessTime = accessTime; } @@ -72,6 +77,7 @@ public interface INodeAttributes { SnapshotCopy(INode inode) { this.name = inode.getLocalNameBytes(); this.permission = inode.getPermissionLong(); + this.aclFeature = inode.getAclFeature(); this.modificationTime = inode.getModificationTime(); this.accessTime = inode.getAccessTime(); } @@ -108,6 +114,11 @@ public interface INodeAttributes { return permission; } + @Override + public AclFeature getAclFeature() { + return aclFeature; + } + @Override public final long getModificationTime() { return modificationTime; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index f9a06f1e5bd..f981db129ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -77,8 +77,11 @@ public class INodeDirectory extends INodeWithAdditionalFields * @param other The INodeDirectory to be copied * @param adopt Indicate whether or not need to set the parent field of child * INodes to the new node + * @param featuresToCopy any number of features to copy to the new node. + * The method will do a reference copy, not a deep copy. */ - public INodeDirectory(INodeDirectory other, boolean adopt, boolean copyFeatures) { + public INodeDirectory(INodeDirectory other, boolean adopt, + Feature... featuresToCopy) { super(other); this.children = other.children; if (adopt && this.children != null) { @@ -86,9 +89,7 @@ public class INodeDirectory extends INodeWithAdditionalFields child.setParent(this); } } - if (copyFeatures) { - this.features = other.features; - } + this.features = featuresToCopy; } /** @return true unconditionally. */ @@ -145,12 +146,7 @@ public class INodeDirectory extends INodeWithAdditionalFields * otherwise, return null. */ public final DirectoryWithQuotaFeature getDirectoryWithQuotaFeature() { - for (Feature f : features) { - if (f instanceof DirectoryWithQuotaFeature) { - return (DirectoryWithQuotaFeature)f; - } - } - return null; + return getFeature(DirectoryWithQuotaFeature.class); } /** Is this directory with quota? */ @@ -185,12 +181,7 @@ public class INodeDirectory extends INodeWithAdditionalFields * otherwise, return null. */ public final DirectoryWithSnapshotFeature getDirectoryWithSnapshotFeature() { - for (Feature f : features) { - if (f instanceof DirectoryWithSnapshotFeature) { - return (DirectoryWithSnapshotFeature) f; - } - } - return null; + return getFeature(DirectoryWithSnapshotFeature.class); } /** Is this file has the snapshot feature? */ @@ -231,7 +222,8 @@ public class INodeDirectory extends INodeWithAdditionalFields public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) { Preconditions.checkState(getClass() != INodeDirectory.class, "the class is already INodeDirectory, this=%s", this); - return replaceSelf(new INodeDirectory(this, true, true), inodeMap); + return replaceSelf(new INodeDirectory(this, true, this.getFeatures()), + inodeMap); } /** Replace itself with the given directory. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java index b0ea44bd811..861e85226d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java @@ -35,8 +35,8 @@ public interface INodeDirectoryAttributes extends INodeAttributes { public static class SnapshotCopy extends INodeAttributes.SnapshotCopy implements INodeDirectoryAttributes { public SnapshotCopy(byte[] name, PermissionStatus permissions, - long modificationTime) { - super(name, permissions, modificationTime, 0L); + AclFeature aclFeature, long modificationTime) { + super(name, permissions, aclFeature, modificationTime, 0L); } public SnapshotCopy(INodeDirectory dir) { @@ -62,8 +62,9 @@ public interface INodeDirectoryAttributes extends INodeAttributes { public CopyWithQuota(byte[] name, PermissionStatus permissions, - long modificationTime, long nsQuota, long dsQuota) { - super(name, permissions, modificationTime); + AclFeature aclFeature, long modificationTime, long nsQuota, + long dsQuota) { + super(name, permissions, aclFeature, modificationTime); this.nsQuota = nsQuota; this.dsQuota = dsQuota; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 80abb5268dc..831ab21b7a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -151,12 +151,7 @@ public class INodeFile extends INodeWithAdditionalFields * otherwise, return null. */ public final FileUnderConstructionFeature getFileUnderConstructionFeature() { - for (Feature f : features) { - if (f instanceof FileUnderConstructionFeature) { - return (FileUnderConstructionFeature) f; - } - } - return null; + return getFeature(FileUnderConstructionFeature.class); } /** Is this file under construction? */ @@ -265,12 +260,7 @@ public class INodeFile extends INodeWithAdditionalFields * otherwise, return null. */ public final FileWithSnapshotFeature getFileWithSnapshotFeature() { - for (Feature f: features) { - if (f instanceof FileWithSnapshotFeature) { - return (FileWithSnapshotFeature) f; - } - } - return null; + return getFeature(FileWithSnapshotFeature.class); } /** Is this file has the snapshot feature? */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java index e9e2e872063..7e656f17f15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java @@ -41,9 +41,9 @@ public interface INodeFileAttributes extends INodeAttributes { private final long header; public SnapshotCopy(byte[] name, PermissionStatus permissions, - long modificationTime, long accessTime, + AclFeature aclFeature, long modificationTime, long accessTime, short replication, long preferredBlockSize) { - super(name, permissions, modificationTime, accessTime); + super(name, permissions, aclFeature, modificationTime, accessTime); final long h = HeaderFormat.combineReplication(0L, replication); header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index a1e3c116a78..bf3be5b85e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -213,6 +213,22 @@ public abstract class INodeReference extends INode { public final FsPermission getFsPermission(int snapshotId) { return referred.getFsPermission(snapshotId); } + + @Override + final AclFeature getAclFeature(int snapshotId) { + return referred.getAclFeature(snapshotId); + } + + @Override + final void addAclFeature(AclFeature aclFeature) { + referred.addAclFeature(aclFeature); + } + + @Override + final void removeAclFeature() { + referred.removeAclFeature(); + } + @Override public final short getFsPermissionShort() { return referred.getFsPermissionShort(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java index d9fdc41a84e..77f9bde78d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; +import org.apache.hadoop.hdfs.server.namenode.INode.Feature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.util.LightWeightGSet.LinkedElement; @@ -219,6 +220,15 @@ public abstract class INodeWithAdditionalFields extends INode return permission; } + @Override + final AclFeature getAclFeature(int snapshotId) { + if (snapshotId != Snapshot.CURRENT_STATE_ID) { + return getSnapshotINode(snapshotId).getAclFeature(); + } + + return getFeature(AclFeature.class); + } + @Override final long getModificationTime(int snapshotId) { if (snapshotId != Snapshot.CURRENT_STATE_ID) { @@ -305,4 +315,33 @@ public abstract class INodeWithAdditionalFields extends INode + f.getClass().getSimpleName() + " not found."); features = arr; } + + protected T getFeature(Class clazz) { + for (Feature f : features) { + if (f.getClass() == clazz) { + @SuppressWarnings("unchecked") + T ret = (T) f; + return ret; + } + } + return null; + } + + public void removeAclFeature() { + AclFeature f = getAclFeature(); + Preconditions.checkNotNull(f); + removeFeature(f); + } + + public void addAclFeature(AclFeature f) { + AclFeature f1 = getAclFeature(); + if (f1 != null) + throw new IllegalStateException("Duplicated ACLFeature"); + + addFeature(f); + } + + public final Feature[] getFeatures() { + return features; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 3eecc104c6d..515689f8330 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -49,6 +49,8 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.ha.HAServiceStatus; @@ -1287,5 +1289,37 @@ class NameNodeRpcServer implements NamenodeProtocols { throws IOException { return namesystem.listCachePools(prevKey != null ? prevKey : ""); } + + @Override + public void modifyAclEntries(String src, List aclSpec) + throws IOException { + namesystem.modifyAclEntries(src, aclSpec); + } + + @Override + public void removeAclEntries(String src, List aclSpec) + throws IOException { + namesystem.removeAclEntries(src, aclSpec); + } + + @Override + public void removeDefaultAcl(String src) throws IOException { + namesystem.removeDefaultAcl(src); + } + + @Override + public void removeAcl(String src) throws IOException { + namesystem.removeAcl(src); + } + + @Override + public void setAcl(String src, List aclSpec) throws IOException { + namesystem.setAcl(src, aclSpec); + } + + @Override + public AclStatus getAclStatus(String src) throws IOException { + return namesystem.getAclStatus(src); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ScopedAclEntries.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ScopedAclEntries.java new file mode 100644 index 00000000000..d841d3689a4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ScopedAclEntries.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; + +/** + * Groups a list of ACL entries into separate lists for access entries vs. + * default entries. + */ +@InterfaceAudience.Private +final class ScopedAclEntries { + private static final int PIVOT_NOT_FOUND = -1; + + private final List accessEntries; + private final List defaultEntries; + + /** + * Creates a new ScopedAclEntries from the given list. It is assumed that the + * list is already sorted such that all access entries precede all default + * entries. + * + * @param aclEntries List to separate + */ + public ScopedAclEntries(List aclEntries) { + int pivot = calculatePivotOnDefaultEntries(aclEntries); + if (pivot != PIVOT_NOT_FOUND) { + accessEntries = pivot != 0 ? aclEntries.subList(0, pivot) : + Collections.emptyList(); + defaultEntries = aclEntries.subList(pivot, aclEntries.size()); + } else { + accessEntries = aclEntries; + defaultEntries = Collections.emptyList(); + } + } + + /** + * Returns access entries. + * + * @return List containing just access entries, or an empty list if + * there are no access entries + */ + public List getAccessEntries() { + return accessEntries; + } + + /** + * Returns default entries. + * + * @return List containing just default entries, or an empty list if + * there are no default entries + */ + public List getDefaultEntries() { + return defaultEntries; + } + + /** + * Returns the pivot point in the list between the access entries and the + * default entries. This is the index of the first element in the list that is + * a default entry. + * + * @param aclBuilder ArrayList containing entries to build + * @return int pivot point, or -1 if list contains no default entries + */ + private static int calculatePivotOnDefaultEntries(List aclBuilder) { + for (int i = 0; i < aclBuilder.size(); ++i) { + if (aclBuilder.get(i).getScope() == AclEntryScope.DEFAULT) { + return i; + } + } + return PIVOT_NOT_FOUND; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index 86f2daaf0ae..2a7b242ceb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -36,8 +36,11 @@ import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.server.namenode.AclFeature; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; @@ -154,7 +157,7 @@ public class FSImageFormatPBSnapshot { SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot .parseDelimitedFrom(in); INodeDirectory root = loadINodeDirectory(pbs.getRoot(), - parent.getLoaderContext().getStringTable()); + parent.getLoaderContext()); int sid = pbs.getSnapshotId(); INodeDirectorySnapshottable parent = (INodeDirectorySnapshottable) fsDir .getInode(root.getId()).asDirectory(); @@ -197,6 +200,7 @@ public class FSImageFormatPBSnapshot { private void loadFileDiffList(InputStream in, INodeFile file, int size) throws IOException { final FileDiffList diffs = new FileDiffList(); + final LoaderContext state = parent.getLoaderContext(); for (int i = 0; i < size; i++) { SnapshotDiffSection.FileDiff pbf = SnapshotDiffSection.FileDiff .parseDelimitedFrom(in); @@ -204,10 +208,16 @@ public class FSImageFormatPBSnapshot { if (pbf.hasSnapshotCopy()) { INodeSection.INodeFile fileInPb = pbf.getSnapshotCopy(); PermissionStatus permission = loadPermission( - fileInPb.getPermission(), parent.getLoaderContext() - .getStringTable()); + fileInPb.getPermission(), state.getStringTable()); + + AclFeature acl = null; + if (fileInPb.hasAcl()) { + acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries( + fileInPb.getAcl(), state.getStringTable())); + } + copy = new INodeFileAttributes.SnapshotCopy(pbf.getName() - .toByteArray(), permission, fileInPb.getModificationTime(), + .toByteArray(), permission, acl, fileInPb.getModificationTime(), fileInPb.getAccessTime(), (short) fileInPb.getReplication(), fileInPb.getPreferredBlockSize()); } @@ -277,6 +287,8 @@ public class FSImageFormatPBSnapshot { dir.addSnapshotFeature(null); } DirectoryDiffList diffs = dir.getDiffs(); + final LoaderContext state = parent.getLoaderContext(); + for (int i = 0; i < size; i++) { // load a directory diff SnapshotDiffSection.DirectoryDiff diffInPb = SnapshotDiffSection. @@ -292,15 +304,22 @@ public class FSImageFormatPBSnapshot { INodeSection.INodeDirectory dirCopyInPb = diffInPb.getSnapshotCopy(); final byte[] name = diffInPb.getName().toByteArray(); PermissionStatus permission = loadPermission( - dirCopyInPb.getPermission(), parent.getLoaderContext() - .getStringTable()); + dirCopyInPb.getPermission(), state.getStringTable()); + AclFeature acl = null; + if (dirCopyInPb.hasAcl()) { + acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries( + dirCopyInPb.getAcl(), state.getStringTable())); + } + long modTime = dirCopyInPb.getModificationTime(); boolean noQuota = dirCopyInPb.getNsQuota() == -1 && dirCopyInPb.getDsQuota() == -1; + copy = noQuota ? new INodeDirectoryAttributes.SnapshotCopy(name, - permission, modTime) + permission, acl, modTime) : new INodeDirectoryAttributes.CopyWithQuota(name, permission, - modTime, dirCopyInPb.getNsQuota(), dirCopyInPb.getDsQuota()); + acl, modTime, dirCopyInPb.getNsQuota(), + dirCopyInPb.getDsQuota()); } // load created list List clist = loadCreatedList(in, dir, @@ -355,7 +374,7 @@ public class FSImageFormatPBSnapshot { SnapshotSection.Snapshot.Builder sb = SnapshotSection.Snapshot .newBuilder().setSnapshotId(s.getId()); INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot, - parent.getSaverContext().getStringMap()); + parent.getSaverContext()); INodeSection.INode r = INodeSection.INode.newBuilder() .setId(sroot.getId()) .setType(INodeSection.INode.Type.DIRECTORY) @@ -443,7 +462,7 @@ public class FSImageFormatPBSnapshot { INodeFileAttributes copy = diff.snapshotINode; if (copy != null) { fb.setName(ByteString.copyFrom(copy.getLocalNameBytes())) - .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext().getStringMap())); + .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext())); } fb.build().writeDelimitedTo(out); } @@ -480,7 +499,7 @@ public class FSImageFormatPBSnapshot { if (!diff.isSnapshotRoot() && copy != null) { db.setName(ByteString.copyFrom(copy.getLocalNameBytes())) .setSnapshotCopy( - buildINodeDirectory(copy, parent.getSaverContext().getStringMap())); + buildINodeDirectory(copy, parent.getSaverContext())); } // process created list and deleted list List created = diff.getChildrenDiff() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java index 7e1863f9538..184aa877385 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java @@ -184,7 +184,7 @@ public class INodeDirectorySnapshottable extends INodeDirectory { private int snapshotQuota = SNAPSHOT_LIMIT; public INodeDirectorySnapshottable(INodeDirectory dir) { - super(dir, true, true); + super(dir, true, dir.getFeatures()); // add snapshot feature if the original directory does not have it if (!isWithSnapshot()) { addSnapshotFeature(null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java index ba23439abe0..f4bba7c429f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java @@ -21,6 +21,7 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.text.SimpleDateFormat; +import java.util.Arrays; import java.util.Comparator; import java.util.Date; @@ -28,12 +29,16 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.AclFeature; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.util.ReadOnlyList; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; + /** Snapshot of a sub-tree in the namesystem. */ @InterfaceAudience.Private public class Snapshot implements Comparable { @@ -139,7 +144,10 @@ public class Snapshot implements Comparable { /** The root directory of the snapshot. */ static public class Root extends INodeDirectory { Root(INodeDirectory other) { - super(other, false, false); + // Always preserve ACL. + super(other, false, Lists.newArrayList( + Iterables.filter(Arrays.asList(other.getFeatures()), AclFeature.class)) + .toArray(new Feature[0])); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 162b2b39588..ec2685f9dd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -53,6 +53,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -71,6 +72,7 @@ import org.apache.hadoop.hdfs.web.ParamFilter; import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; +import org.apache.hadoop.hdfs.web.resources.AclPermissionParam; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam; @@ -320,12 +322,14 @@ public class NamenodeWebHdfsMethods { @QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT) final CreateParentParam createParent, @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT) - final TokenArgumentParam delegationTokenArgument - ) throws IOException, InterruptedException { + final TokenArgumentParam delegationTokenArgument, + @QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT) + final AclPermissionParam aclPermission + )throws IOException, InterruptedException { return put(ugi, delegation, username, doAsUser, ROOT, op, destination, owner, group, permission, overwrite, bufferSize, replication, blockSize, modificationTime, accessTime, renameOptions, createParent, - delegationTokenArgument); + delegationTokenArgument,aclPermission); } /** Handle HTTP PUT request. */ @@ -369,12 +373,14 @@ public class NamenodeWebHdfsMethods { @QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT) final CreateParentParam createParent, @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT) - final TokenArgumentParam delegationTokenArgument + final TokenArgumentParam delegationTokenArgument, + @QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT) + final AclPermissionParam aclPermission ) throws IOException, InterruptedException { init(ugi, delegation, username, doAsUser, path, op, destination, owner, group, permission, overwrite, bufferSize, replication, blockSize, - modificationTime, accessTime, renameOptions, delegationTokenArgument); + modificationTime, accessTime, renameOptions, delegationTokenArgument,aclPermission); return ugi.doAs(new PrivilegedExceptionAction() { @Override @@ -385,7 +391,7 @@ public class NamenodeWebHdfsMethods { path.getAbsolutePath(), op, destination, owner, group, permission, overwrite, bufferSize, replication, blockSize, modificationTime, accessTime, renameOptions, createParent, - delegationTokenArgument); + delegationTokenArgument,aclPermission); } finally { REMOTE_ADDRESS.set(null); } @@ -412,7 +418,8 @@ public class NamenodeWebHdfsMethods { final AccessTimeParam accessTime, final RenameOptionSetParam renameOptions, final CreateParentParam createParent, - final TokenArgumentParam delegationTokenArgument + final TokenArgumentParam delegationTokenArgument, + final AclPermissionParam aclPermission ) throws IOException, URISyntaxException { final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF); @@ -492,6 +499,26 @@ public class NamenodeWebHdfsMethods { np.cancelDelegationToken(token); return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); } + case MODIFYACLENTRIES: { + np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true)); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + } + case REMOVEACLENTRIES: { + np.removeAclEntries(fullpath, aclPermission.getAclPermission(false)); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + } + case REMOVEDEFAULTACL: { + np.removeDefaultAcl(fullpath); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + } + case REMOVEACL: { + np.removeAcl(fullpath); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + } + case SETACL: { + np.setAcl(fullpath, aclPermission.getAclPermission(true)); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + } default: throw new UnsupportedOperationException(op + " is not supported"); } @@ -732,6 +759,15 @@ public class NamenodeWebHdfsMethods { WebHdfsFileSystem.getHomeDirectoryString(ugi)); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } + case GETACLSTATUS: { + AclStatus status = np.getAclStatus(fullpath); + if (status == null) { + throw new FileNotFoundException("File does not exist: " + fullpath); + } + + final String js = JsonUtil.toJsonString(status); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } default: throw new UnsupportedOperationException(op + " is not supported"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index fab49274367..afae1a3e28f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.web; import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.*; @@ -616,4 +618,44 @@ public class JsonUtil { return checksum; } + /** Convert a AclStatus object to a Json string. */ + public static String toJsonString(final AclStatus status) { + if (status == null) { + return null; + } + + final Map m = new TreeMap(); + m.put("owner", status.getOwner()); + m.put("group", status.getGroup()); + m.put("stickyBit", status.isStickyBit()); + m.put("entries", status.getEntries()); + final Map> finalMap = + new TreeMap>(); + finalMap.put(AclStatus.class.getSimpleName(), m); + return JSON.toString(finalMap); + } + + /** Convert a Json map to a AclStatus object. */ + public static AclStatus toAclStatus(final Map json) { + if (json == null) { + return null; + } + + final Map m = (Map) json.get(AclStatus.class.getSimpleName()); + + AclStatus.Builder aclStatusBuilder = new AclStatus.Builder(); + aclStatusBuilder.owner((String) m.get("owner")); + aclStatusBuilder.group((String) m.get("group")); + aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit")); + + final Object[] entries = (Object[]) m.get("entries"); + + List aclEntryList = new ArrayList(); + for (int i = 0; i < entries.length; i++) { + AclEntry aclEntry = AclEntry.parseAclEntry((String) entries[i], true); + aclEntryList.add(aclEntry); + } + aclStatusBuilder.addEntries(aclEntryList); + return aclStatusBuilder.build(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 51eefabda28..491bc5ac411 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -49,6 +49,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; @@ -57,6 +59,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; +import org.apache.hadoop.hdfs.web.resources.AclPermissionParam; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam; @@ -697,6 +700,17 @@ public class WebHdfsFileSystem extends FileSystem f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory())); } + @Override + public AclStatus getAclStatus(Path f) throws IOException { + final HttpOpParam.Op op = GetOpParam.Op.GETACLSTATUS; + final Map json = run(op, f); + AclStatus status = JsonUtil.toAclStatus(json); + if (status == null) { + throw new FileNotFoundException("File does not exist: " + f); + } + return status; + } + @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { statistics.incrementWriteOps(1); @@ -757,6 +771,44 @@ public class WebHdfsFileSystem extends FileSystem run(op, p, new PermissionParam(permission)); } + @Override + public void modifyAclEntries(Path path, List aclSpec) + throws IOException { + statistics.incrementWriteOps(1); + final HttpOpParam.Op op = PutOpParam.Op.MODIFYACLENTRIES; + run(op, path, new AclPermissionParam(aclSpec)); + } + + @Override + public void removeAclEntries(Path path, List aclSpec) + throws IOException { + statistics.incrementWriteOps(1); + final HttpOpParam.Op op = PutOpParam.Op.REMOVEACLENTRIES; + run(op, path, new AclPermissionParam(aclSpec)); + } + + @Override + public void removeDefaultAcl(Path path) throws IOException { + statistics.incrementWriteOps(1); + final HttpOpParam.Op op = PutOpParam.Op.REMOVEDEFAULTACL; + run(op, path); + } + + @Override + public void removeAcl(Path path) throws IOException { + statistics.incrementWriteOps(1); + final HttpOpParam.Op op = PutOpParam.Op.REMOVEACL; + run(op, path); + } + + @Override + public void setAcl(final Path p, final List aclSpec) + throws IOException { + statistics.incrementWriteOps(1); + final HttpOpParam.Op op = PutOpParam.Op.SETACL; + run(op, p, new AclPermissionParam(aclSpec)); + } + @Override public boolean setReplication(final Path p, final short replication ) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java new file mode 100644 index 00000000000..b335379b1bb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT; + +import java.util.List; +import java.util.regex.Pattern; + +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.commons.lang.StringUtils; + +/** AclPermission parameter. */ +public class AclPermissionParam extends StringParam { + /** Parameter name. */ + public static final String NAME = "aclspec"; + /** Default parameter value. */ + public static final String DEFAULT = ""; + + private static Domain DOMAIN = new Domain(NAME, + Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT)); + + /** + * Constructor. + * + * @param str a string representation of the parameter value. + */ + public AclPermissionParam(final String str) { + super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str); + } + + public AclPermissionParam(List acl) { + super(DOMAIN,parseAclSpec(acl).equals(DEFAULT) ? null : parseAclSpec(acl)); + } + + @Override + public String getName() { + return NAME; + } + + public List getAclPermission(boolean includePermission) { + final String v = getValue(); + return (v != null ? AclEntry.parseAclSpec(v, includePermission) : AclEntry + .parseAclSpec(DEFAULT, includePermission)); + } + + /** + * Parse the list of AclEntry and returns aclspec. + * + * @param List + * @return String + */ + private static String parseAclSpec(List aclEntry) { + return StringUtils.join(aclEntry, ","); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java index 916fe553ac4..3c6d47b0f43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java @@ -35,6 +35,7 @@ public class GetOpParam extends HttpOpParam { /** GET_BLOCK_LOCATIONS is a private unstable op. */ GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK), + GETACLSTATUS(false, HttpURLConnection.HTTP_OK), NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java index 6ee84c4ccf4..c3b45310a88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java @@ -37,6 +37,12 @@ public class PutOpParam extends HttpOpParam { RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true), CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true), + MODIFYACLENTRIES(false, HttpURLConnection.HTTP_OK), + REMOVEACLENTRIES(false, HttpURLConnection.HTTP_OK), + REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK), + REMOVEACL(false, HttpURLConnection.HTTP_OK), + SETACL(false, HttpURLConnection.HTTP_OK), + NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); final boolean doOutputAndRedirect; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index c7a6465f11f..58326016c88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -30,6 +30,7 @@ package hadoop.hdfs; import "Security.proto"; import "hdfs.proto"; +import "acl.proto"; /** * The ClientNamenodeProtocol Service defines the interface between a client @@ -719,4 +720,16 @@ service ClientNamenodeProtocol { returns(GetSnapshotDiffReportResponseProto); rpc isFileClosed(IsFileClosedRequestProto) returns(IsFileClosedResponseProto); + rpc modifyAclEntries(ModifyAclEntriesRequestProto) + returns(ModifyAclEntriesResponseProto); + rpc removeAclEntries(RemoveAclEntriesRequestProto) + returns(RemoveAclEntriesResponseProto); + rpc removeDefaultAcl(RemoveDefaultAclRequestProto) + returns(RemoveDefaultAclResponseProto); + rpc removeAcl(RemoveAclRequestProto) + returns(RemoveAclResponseProto); + rpc setAcl(SetAclRequestProto) + returns(SetAclResponseProto); + rpc getAclStatus(GetAclStatusRequestProto) + returns(GetAclStatusResponseProto); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto new file mode 100644 index 00000000000..e940142e339 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.hdfs.protocol.proto"; +option java_outer_classname = "AclProtos"; +option java_generate_equals_and_hash = true; +package hadoop.hdfs; + +import "hdfs.proto"; + +message AclEntryProto { + enum AclEntryScopeProto { + ACCESS = 0x0; + DEFAULT = 0x1; + } + + enum AclEntryTypeProto { + USER = 0x0; + GROUP = 0x1; + MASK = 0x2; + OTHER = 0x3; + } + + enum FsActionProto { + NONE = 0x0; + EXECUTE = 0x1; + WRITE = 0x2; + WRITE_EXECUTE = 0x3; + READ = 0x4; + READ_EXECUTE = 0x5; + READ_WRITE = 0x6; + PERM_ALL = 0x7; + } + + required AclEntryTypeProto type = 1; + required AclEntryScopeProto scope = 2; + required FsActionProto permissions = 3; + optional string name = 4; +} + +message AclStatusProto { + required string owner = 1; + required string group = 2; + required bool sticky = 3; + repeated AclEntryProto entries = 4; +} + +message AclEditLogProto { + required string src = 1; + repeated AclEntryProto entries = 2; +} + +message ModifyAclEntriesRequestProto { + required string src = 1; + repeated AclEntryProto aclSpec = 2; +} + +message ModifyAclEntriesResponseProto { +} + +message RemoveAclRequestProto { + required string src = 1; +} + +message RemoveAclResponseProto { +} + +message RemoveAclEntriesRequestProto { + required string src = 1; + repeated AclEntryProto aclSpec = 2; +} + +message RemoveAclEntriesResponseProto { +} + +message RemoveDefaultAclRequestProto { + required string src = 1; +} + +message RemoveDefaultAclResponseProto { +} + +message SetAclRequestProto { + required string src = 1; + repeated AclEntryProto aclSpec = 2; +} + +message SetAclResponseProto { +} + +message GetAclStatusRequestProto { + required string src = 1; +} + +message GetAclStatusResponseProto { + required AclStatusProto result = 1; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto index 92c7b555150..8dd087752fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto @@ -22,6 +22,7 @@ option java_outer_classname = "FsImageProto"; package hadoop.hdfs.fsimage; import "hdfs.proto"; +import "acl.proto"; /** * This file defines the on-disk layout of the file system image. The @@ -88,6 +89,23 @@ message INodeSection { optional string clientMachine = 2; } + message AclFeatureProto { + /** + * An ACL entry is represented by a 32-bit integer in Big Endian + * format. The bits can be divided in four segments: + * [0:2) || [2:26) || [26:27) || [27:29) || [29:32) + * + * [0:2) -- reserved for futute uses. + * [2:26) -- the name of the entry, which is an ID that points to a + * string in the StringTableSection. + * [26:27) -- the scope of the entry (AclEntryScopeProto) + * [27:29) -- the type of the entry (AclEntryTypeProto) + * [29:32) -- the permission of the entry (FsActionProto) + * + */ + repeated fixed32 entries = 2 [packed = true]; + } + message INodeFile { optional uint32 replication = 1; optional uint64 modificationTime = 2; @@ -96,6 +114,7 @@ message INodeSection { optional fixed64 permission = 5; repeated BlockProto blocks = 6; optional FileUnderConstructionFeature fileUC = 7; + optional AclFeatureProto acl = 8; } message INodeDirectory { @@ -105,6 +124,7 @@ message INodeSection { // diskspace quota optional uint64 dsQuota = 3; optional fixed64 permission = 4; + optional AclFeatureProto acl = 5; } message INodeSymlink { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 89b4dc6c954..3419045dafe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -363,6 +363,16 @@ --> + + dfs.namenode.acls.enabled + false + + Set to true to enable support for HDFS ACLs (Access Control Lists). By + default, ACLs are disabled. When ACLs are disabled, the NameNode rejects + all RPCs related to setting or getting ACLs. + + + dfs.block.access.token.enable false diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm index 7ab08ced7af..e0a387f4929 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm @@ -47,6 +47,10 @@ HDFS Permissions Guide client process, and its group is the group of the parent directory (the BSD rule). + HDFS also provides optional support for POSIX ACLs (Access Control Lists) to + augment file permissions with finer-grained rules for specific named users or + named groups. ACLs are discussed in greater detail later in this document. + Each client process that accesses HDFS has a two-part identity composed of the user name, and groups list. Whenever HDFS must do a permissions check for a file or directory foo accessed by a client process, @@ -219,9 +223,173 @@ HDFS Permissions Guide identity matches the super-user, parts of the name space may be inaccessible to the web server. +* ACLs (Access Control Lists) + + In addition to the traditional POSIX permissions model, HDFS also supports + POSIX ACLs (Access Control Lists). ACLs are useful for implementing + permission requirements that differ from the natural organizational hierarchy + of users and groups. An ACL provides a way to set different permissions for + specific named users or named groups, not only the file's owner and the + file's group. + + By default, support for ACLs is disabled, and the NameNode disallows creation + of ACLs. To enable support for ACLs, set <<>> to + true in the NameNode configuration. + + An ACL consists of a set of ACL entries. Each ACL entry names a specific + user or group and grants or denies read, write and execute permissions for + that specific user or group. For example: + ++-- + user::rw- + user:bruce:rwx #effective:r-- + group::r-x #effective:r-- + group:sales:rwx #effective:r-- + mask::r-- + other::r-- ++-- + + ACL entries consist of a type, an optional name and a permission string. + For display purposes, ':' is used as the delimiter between each field. In + this example ACL, the file owner has read-write access, the file group has + read-execute access and others have read access. So far, this is equivalent + to setting the file's permission bits to 654. + + Additionally, there are 2 extended ACL entries for the named user bruce and + the named group sales, both granted full access. The mask is a special ACL + entry that filters the permissions granted to all named user entries and + named group entries, and also the unnamed group entry. In the example, the + mask has only read permissions, and we can see that the effective permissions + of several ACL entries have been filtered accordingly. + + Every ACL must have a mask. If the user doesn't supply a mask while setting + an ACL, then a mask is inserted automatically by calculating the union of + permissions on all entries that would be filtered by the mask. + + Running <<>> on a file that has an ACL actually changes the + permissions of the mask. Since the mask acts as a filter, this effectively + constrains the permissions of all extended ACL entries instead of changing + just the group entry and possibly missing other extended ACL entries. + + The model also differentiates between an "access ACL", which defines the + rules to enforce during permission checks, and a "default ACL", which defines + the ACL entries that new child files or sub-directories receive automatically + during creation. For example: + ++-- + user::rwx + group::r-x + other::r-x + default:user::rwx + default:user:bruce:rwx #effective:r-x + default:group::r-x + default:group:sales:rwx #effective:r-x + default:mask::r-x + default:other::r-x ++-- + + Only directories may have a default ACL. When a new file or sub-directory is + created, it automatically copies the default ACL of its parent into its own + access ACL. A new sub-directory also copies it to its own default ACL. In + this way, the default ACL will be copied down through arbitrarily deep levels + of the file system tree as new sub-directories get created. + + The exact permission values in the new child's access ACL are subject to + filtering by the mode parameter. Considering the default umask of 022, this + is typically 755 for new directories and 644 for new files. The mode + parameter filters the copied permission values for the unnamed user (file + owner), the mask and other. Using this particular example ACL, and creating + a new sub-directory with 755 for the mode, this mode filtering has no effect + on the final result. However, if we consider creation of a file with 644 for + the mode, then mode filtering causes the new file's ACL to receive read-write + for the unnamed user (file owner), read for the mask and read for others. + This mask also means that effective permissions for named user bruce and + named group sales are only read. + + Note that the copy occurs at time of creation of the new file or + sub-directory. Subsequent changes to the parent's default ACL do not change + existing children. + + The default ACL must have all minimum required ACL entries, including the + unnamed user (file owner), unnamed group (file group) and other entries. If + the user doesn't supply one of these entries while setting a default ACL, + then the entries are inserted automatically by copying the corresponding + permissions from the access ACL, or permission bits if there is no access + ACL. The default ACL also must have mask. As described above, if the mask + is unspecified, then a mask is inserted automatically by calculating the + union of permissions on all entries that would be filtered by the mask. + + When considering a file that has an ACL, the algorithm for permission checks + changes to: + + * If the user name matches the owner of file, then the owner + permissions are tested; + + * Else if the user name matches the name in one of the named user entries, + then these permissions are tested, filtered by the mask permissions; + + * Else if the group of file matches any member of the groups list, + and if these permissions filtered by the mask grant access, then these + permissions are used; + + * Else if there is a named group entry matching a member of the groups list, + and if these permissions filtered by the mask grant access, then these + permissions are used; + + * Else if the file group or any named group entry matches a member of the + groups list, but access was not granted by any of those permissions, then + access is denied; + + * Otherwise the other permissions of file are tested. + + Best practice is to rely on traditional permission bits to implement most + permission requirements, and define a smaller number of ACLs to augment the + permission bits with a few exceptional rules. A file with an ACL incurs an + additional cost in memory in the NameNode compared to a file that has only + permission bits. + +* ACLs File System API + + New methods: + + * << aclSpec) throws + IOException;>>> + + * << aclSpec) throws + IOException;>>> + + * <<>> + + * <<>> + + * << aclSpec) throws + IOException;>>> + + * <<>> + +* ACLs Shell Commands + + * << >>> + + Displays the Access Control Lists (ACLs) of files and directories. If a + directory has a default ACL, then getfacl also displays the default ACL. + + * <<} ]|[--set ] >>> + + Sets Access Control Lists (ACLs) of files and directories. + + * << >>> + + The output of <<>> will append a '+' character to the permissions + string of any file or directory that has an ACL. + + See the {{{../hadoop-common/FileSystemShell.html}File System Shell}} + documentation for full coverage of these commands. + * Configuration Parameters - * <<>> + * <<>> If yes use the permissions system as described here. If no, permission checking is turned off, but all other behavior is @@ -255,3 +423,9 @@ HDFS Permissions Guide The administrators for the cluster specified as an ACL. This controls who can access the default servlets, etc. in the HDFS. + + * <<>> + + Set to true to enable support for HDFS ACLs (Access Control Lists). By + default, ACLs are disabled. When ACLs are disabled, the NameNode rejects + all attempts to set an ACL. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm index cf6293c4913..fbfd880c1f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm @@ -752,6 +752,148 @@ Content-Length: 0 {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setTimes +** {Modify ACL Entries} + + * Submit a HTTP PUT request. + ++--------------------------------- +curl -i -X PUT "http://:/webhdfs/v1/?op=MODIFYACLENTRIES + &aclspec=" ++--------------------------------- + + The client receives a response with zero content length: + ++--------------------------------- +HTTP/1.1 200 OK +Content-Length: 0 ++--------------------------------- + + [] + + See also: + {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.modifyAclEntries + + +** {Remove ACL Entries} + + * Submit a HTTP PUT request. + ++--------------------------------- +curl -i -X PUT "http://:/webhdfs/v1/?op=REMOVEACLENTRIES + &aclspec=" ++--------------------------------- + + The client receives a response with zero content length: + ++--------------------------------- +HTTP/1.1 200 OK +Content-Length: 0 ++--------------------------------- + + [] + + See also: + {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeAclEntries + + +** {Remove Default ACL} + + * Submit a HTTP PUT request. + ++--------------------------------- +curl -i -X PUT "http://:/webhdfs/v1/?op=REMOVEDEFAULTACL" ++--------------------------------- + + The client receives a response with zero content length: + ++--------------------------------- +HTTP/1.1 200 OK +Content-Length: 0 ++--------------------------------- + + [] + + See also: + {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeDefaultAcl + + +** {Remove ACL} + + * Submit a HTTP PUT request. + ++--------------------------------- +curl -i -X PUT "http://:/webhdfs/v1/?op=REMOVEACL" ++--------------------------------- + + The client receives a response with zero content length: + ++--------------------------------- +HTTP/1.1 200 OK +Content-Length: 0 ++--------------------------------- + + [] + + See also: + {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeAcl + + +** {Set ACL} + + * Submit a HTTP PUT request. + ++--------------------------------- +curl -i -X PUT "http://:/webhdfs/v1/?op=SETACL + &aclspec=" ++--------------------------------- + + The client receives a response with zero content length: + ++--------------------------------- +HTTP/1.1 200 OK +Content-Length: 0 ++--------------------------------- + + [] + + See also: + {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setAcl + + +** {Get ACL Status} + + * Submit a HTTP GET request. + ++--------------------------------- +curl -i -X PUT "http://:/webhdfs/v1/?op=GETACLSTATUS" ++--------------------------------- + + The client receives a response with a {{{ACL Status JSON Schema}<<>> JSON object}}: + ++--------------------------------- +HTTP/1.1 200 OK +Content-Type: application/json +Transfer-Encoding: chunked + +{ + "AclStatus": { + "entries": [ + "user:carla:rw-", + "group::r-x" + ], + "group": "supergroup", + "owner": "hadoop", + "stickyBit": false + } +} ++--------------------------------- + + [] + + See also: + {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus + + * {Delegation Token Operations} ** {Get Delegation Token} @@ -980,6 +1122,52 @@ Transfer-Encoding: chunked However, if additional properties are included in the responses, they are considered as optional properties in order to maintain compatibility. +** {ACL Status JSON Schema} + ++--------------------------------- +{ + "name" : "AclStatus", + "properties": + { + "AclStatus": + { + "type" : "object", + "properties": + { + "entries": + { + "type": "array" + "items": + { + "description": "ACL entry.", + "type": "string" + } + }, + "group": + { + "description": "The group owner.", + "type" : "string", + "required" : true + }, + "owner": + { + "description": "The user who is the owner.", + "type" : "string", + "required" : true + }, + "stickyBit": + { + "description": "True if the sticky bit is on.", + "type" : "boolean", + "required" : true + }, + } + } + } +} ++--------------------------------- + + ** {Boolean JSON Schema} +--------------------------------- @@ -1387,6 +1575,23 @@ var tokenProperties = * {HTTP Query Parameter Dictionary} +** {ACL Spec} + +*----------------+-------------------------------------------------------------------+ +|| Name | <<>> | +*----------------+-------------------------------------------------------------------+ +|| Description | The ACL spec included in ACL modification operations. | +*----------------+-------------------------------------------------------------------+ +|| Type | String | +*----------------+-------------------------------------------------------------------+ +|| Default Value | \ | +*----------------+-------------------------------------------------------------------+ +|| Valid Values | See {{{./HdfsPermissionsGuide.html}Permissions and HDFS}}. | +*----------------+-------------------------------------------------------------------+ +|| Syntax | See {{{./HdfsPermissionsGuide.html}Permissions and HDFS}}. | +*----------------+-------------------------------------------------------------------+ + + ** {Access Time} *----------------+-------------------------------------------------------------------+ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java new file mode 100644 index 00000000000..02207e60535 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.cli; + +import org.apache.hadoop.cli.util.CLICommand; +import org.apache.hadoop.cli.util.CommandExecutor.Result; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestAclCLI extends CLITestHelperDFS { + private MiniDFSCluster cluster = null; + private FileSystem fs = null; + private String namenode = null; + private String username = null; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + fs = cluster.getFileSystem(); + namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); + username = System.getProperty("user.name"); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + + @Override + protected String getTestFile() { + return "testAclCLI.xml"; + } + + @Override + protected String expandCommand(final String cmd) { + String expCmd = cmd; + expCmd = expCmd.replaceAll("NAMENODE", namenode); + expCmd = expCmd.replaceAll("USERNAME", username); + expCmd = expCmd.replaceAll("#LF#", + System.getProperty("line.separator")); + expCmd = super.expandCommand(expCmd); + return expCmd; + } + + @Override + protected Result execute(CLICommand cmd) throws Exception { + return cmd.getExecutor(namenode).executeCommand(cmd.getCmd()); + } + + @Test + @Override + public void testAll() { + super.testAll(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java index e473b0a3082..849bfa202de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java @@ -17,15 +17,21 @@ */ package org.apache.hadoop.fs.permission; +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.util.Arrays; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -33,8 +39,12 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; public class TestStickyBit { @@ -43,56 +53,89 @@ public class TestStickyBit { UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"}); static UserGroupInformation user2 = UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"}); - + + private static MiniDFSCluster cluster; + private static Configuration conf; + private static FileSystem hdfs; + private static FileSystem hdfsAsUser1; + private static FileSystem hdfsAsUser2; + + @BeforeClass + public static void init() throws Exception { + conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + initCluster(true); + } + + private static void initCluster(boolean format) throws Exception { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format) + .build(); + hdfs = cluster.getFileSystem(); + assertTrue(hdfs instanceof DistributedFileSystem); + hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf); + assertTrue(hdfsAsUser1 instanceof DistributedFileSystem); + hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf); + assertTrue(hdfsAsUser2 instanceof DistributedFileSystem); + } + + @Before + public void setup() throws Exception { + if (hdfs != null) { + for (FileStatus stat: hdfs.listStatus(new Path("/"))) { + hdfs.delete(stat.getPath(), true); + } + } + } + + @AfterClass + public static void shutdown() throws Exception { + IOUtils.cleanup(null, hdfs, hdfsAsUser1, hdfsAsUser2); + if (cluster != null) { + cluster.shutdown(); + } + } + /** * Ensure that even if a file is in a directory with the sticky bit on, * another user can write to that file (assuming correct permissions). */ - private void confirmCanAppend(Configuration conf, FileSystem hdfs, - Path baseDir) throws IOException, InterruptedException { - // Create a tmp directory with wide-open permissions and sticky bit - Path p = new Path(baseDir, "tmp"); - - hdfs.mkdirs(p); - hdfs.setPermission(p, new FsPermission((short) 01777)); - + private void confirmCanAppend(Configuration conf, Path p) throws Exception { // Write a file to the new tmp directory as a regular user - hdfs = DFSTestUtil.getFileSystemAs(user1, conf); Path file = new Path(p, "foo"); - writeFile(hdfs, file); - hdfs.setPermission(file, new FsPermission((short) 0777)); + writeFile(hdfsAsUser1, file); + hdfsAsUser1.setPermission(file, new FsPermission((short) 0777)); // Log onto cluster as another user and attempt to append to file - hdfs = DFSTestUtil.getFileSystemAs(user2, conf); Path file2 = new Path(p, "foo"); - FSDataOutputStream h = hdfs.append(file2); - h.write("Some more data".getBytes()); - h.close(); + FSDataOutputStream h = null; + try { + h = hdfsAsUser2.append(file2); + h.write("Some more data".getBytes()); + h.close(); + h = null; + } finally { + IOUtils.cleanup(null, h); + } } /** * Test that one user can't delete another user's file when the sticky bit is * set. */ - private void confirmDeletingFiles(Configuration conf, FileSystem hdfs, - Path baseDir) throws IOException, InterruptedException { - Path p = new Path(baseDir, "contemporary"); - hdfs.mkdirs(p); - hdfs.setPermission(p, new FsPermission((short) 01777)); - + private void confirmDeletingFiles(Configuration conf, Path p) + throws Exception { // Write a file to the new temp directory as a regular user - hdfs = DFSTestUtil.getFileSystemAs(user1, conf); Path file = new Path(p, "foo"); - writeFile(hdfs, file); + writeFile(hdfsAsUser1, file); // Make sure the correct user is the owner - assertEquals(user1.getShortUserName(), hdfs.getFileStatus(file).getOwner()); + assertEquals(user1.getShortUserName(), + hdfsAsUser1.getFileStatus(file).getOwner()); // Log onto cluster as another user and attempt to delete the file - FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user2, conf); - try { - hdfs2.delete(file, false); + hdfsAsUser2.delete(file, false); fail("Shouldn't be able to delete someone else's file with SB on"); } catch (IOException ioe) { assertTrue(ioe instanceof AccessControlException); @@ -105,13 +148,8 @@ public class TestStickyBit { * on, the new directory does not automatically get a sticky bit, as is * standard Unix behavior */ - private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path baseDir) + private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path p) throws IOException { - Path p = new Path(baseDir, "scissorsisters"); - - // Turn on its sticky bit - hdfs.mkdirs(p, new FsPermission((short) 01666)); - // Create a subdirectory within it Path p2 = new Path(p, "bar"); hdfs.mkdirs(p2); @@ -123,23 +161,19 @@ public class TestStickyBit { /** * Test basic ability to get and set sticky bits on files and directories. */ - private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir) + private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir) throws IOException { - Path p1 = new Path(baseDir, "roguetraders"); - - hdfs.mkdirs(p1); - // Initially sticky bit should not be set - assertFalse(hdfs.getFileStatus(p1).getPermission().getStickyBit()); + assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit()); // Same permission, but with sticky bit on short withSB; - withSB = (short) (hdfs.getFileStatus(p1).getPermission().toShort() | 01000); + withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000); assertTrue((new FsPermission(withSB)).getStickyBit()); - hdfs.setPermission(p1, new FsPermission(withSB)); - assertTrue(hdfs.getFileStatus(p1).getPermission().getStickyBit()); + hdfs.setPermission(p, new FsPermission(withSB)); + assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit()); // Write a file to the fs, try to set its sticky bit Path f = new Path(baseDir, "somefile"); @@ -154,37 +188,78 @@ public class TestStickyBit { } @Test - public void testGeneralSBBehavior() throws IOException, InterruptedException { - MiniDFSCluster cluster = null; - try { - Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); + public void testGeneralSBBehavior() throws Exception { + Path baseDir = new Path("/mcgann"); + hdfs.mkdirs(baseDir); - FileSystem hdfs = cluster.getFileSystem(); + // Create a tmp directory with wide-open permissions and sticky bit + Path p = new Path(baseDir, "tmp"); - assertTrue(hdfs instanceof DistributedFileSystem); + hdfs.mkdirs(p); + hdfs.setPermission(p, new FsPermission((short) 01777)); - Path baseDir = new Path("/mcgann"); - hdfs.mkdirs(baseDir); - confirmCanAppend(conf, hdfs, baseDir); + confirmCanAppend(conf, p); - baseDir = new Path("/eccleston"); - hdfs.mkdirs(baseDir); - confirmSettingAndGetting(hdfs, baseDir); + baseDir = new Path("/eccleston"); + hdfs.mkdirs(baseDir); + p = new Path(baseDir, "roguetraders"); - baseDir = new Path("/tennant"); - hdfs.mkdirs(baseDir); - confirmDeletingFiles(conf, hdfs, baseDir); + hdfs.mkdirs(p); + confirmSettingAndGetting(hdfs, p, baseDir); - baseDir = new Path("/smith"); - hdfs.mkdirs(baseDir); - confirmStickyBitDoesntPropagate(hdfs, baseDir); + baseDir = new Path("/tennant"); + hdfs.mkdirs(baseDir); + p = new Path(baseDir, "contemporary"); + hdfs.mkdirs(p); + hdfs.setPermission(p, new FsPermission((short) 01777)); + confirmDeletingFiles(conf, p); - } finally { - if (cluster != null) - cluster.shutdown(); - } + baseDir = new Path("/smith"); + hdfs.mkdirs(baseDir); + p = new Path(baseDir, "scissorsisters"); + + // Turn on its sticky bit + hdfs.mkdirs(p, new FsPermission((short) 01666)); + confirmStickyBitDoesntPropagate(hdfs, baseDir); + } + + @Test + public void testAclGeneralSBBehavior() throws Exception { + Path baseDir = new Path("/mcgann"); + hdfs.mkdirs(baseDir); + + // Create a tmp directory with wide-open permissions and sticky bit + Path p = new Path(baseDir, "tmp"); + + hdfs.mkdirs(p); + hdfs.setPermission(p, new FsPermission((short) 01777)); + applyAcl(p); + confirmCanAppend(conf, p); + + baseDir = new Path("/eccleston"); + hdfs.mkdirs(baseDir); + p = new Path(baseDir, "roguetraders"); + + hdfs.mkdirs(p); + applyAcl(p); + confirmSettingAndGetting(hdfs, p, baseDir); + + baseDir = new Path("/tennant"); + hdfs.mkdirs(baseDir); + p = new Path(baseDir, "contemporary"); + hdfs.mkdirs(p); + hdfs.setPermission(p, new FsPermission((short) 01777)); + applyAcl(p); + confirmDeletingFiles(conf, p); + + baseDir = new Path("/smith"); + hdfs.mkdirs(baseDir); + p = new Path(baseDir, "scissorsisters"); + + // Turn on its sticky bit + hdfs.mkdirs(p, new FsPermission((short) 01666)); + applyAcl(p); + confirmStickyBitDoesntPropagate(hdfs, p); } /** @@ -192,46 +267,42 @@ public class TestStickyBit { * bit is set. */ @Test - public void testMovingFiles() throws IOException, InterruptedException { - MiniDFSCluster cluster = null; + public void testMovingFiles() throws Exception { + testMovingFiles(false); + } + @Test + public void testAclMovingFiles() throws Exception { + testMovingFiles(true); + } + + private void testMovingFiles(boolean useAcl) throws Exception { + // Create a tmp directory with wide-open permissions and sticky bit + Path tmpPath = new Path("/tmp"); + Path tmpPath2 = new Path("/tmp2"); + hdfs.mkdirs(tmpPath); + hdfs.mkdirs(tmpPath2); + hdfs.setPermission(tmpPath, new FsPermission((short) 01777)); + if (useAcl) { + applyAcl(tmpPath); + } + hdfs.setPermission(tmpPath2, new FsPermission((short) 01777)); + if (useAcl) { + applyAcl(tmpPath2); + } + + // Write a file to the new tmp directory as a regular user + Path file = new Path(tmpPath, "foo"); + + writeFile(hdfsAsUser1, file); + + // Log onto cluster as another user and attempt to move the file try { - // Set up cluster for testing - Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); - FileSystem hdfs = cluster.getFileSystem(); - - assertTrue(hdfs instanceof DistributedFileSystem); - - // Create a tmp directory with wide-open permissions and sticky bit - Path tmpPath = new Path("/tmp"); - Path tmpPath2 = new Path("/tmp2"); - hdfs.mkdirs(tmpPath); - hdfs.mkdirs(tmpPath2); - hdfs.setPermission(tmpPath, new FsPermission((short) 01777)); - hdfs.setPermission(tmpPath2, new FsPermission((short) 01777)); - - // Write a file to the new tmp directory as a regular user - Path file = new Path(tmpPath, "foo"); - - FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user1, conf); - - writeFile(hdfs2, file); - - // Log onto cluster as another user and attempt to move the file - FileSystem hdfs3 = DFSTestUtil.getFileSystemAs(user2, conf); - - try { - hdfs3.rename(file, new Path(tmpPath2, "renamed")); - fail("Shouldn't be able to rename someone else's file with SB on"); - } catch (IOException ioe) { - assertTrue(ioe instanceof AccessControlException); - assertTrue(ioe.getMessage().contains("sticky bit")); - } - } finally { - if (cluster != null) - cluster.shutdown(); + hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed")); + fail("Shouldn't be able to rename someone else's file with SB on"); + } catch (IOException ioe) { + assertTrue(ioe instanceof AccessControlException); + assertTrue(ioe.getMessage().contains("sticky bit")); } } @@ -241,56 +312,91 @@ public class TestStickyBit { * re-start. */ @Test - public void testStickyBitPersistence() throws IOException { - MiniDFSCluster cluster = null; - try { - Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); - FileSystem hdfs = cluster.getFileSystem(); + public void testStickyBitPersistence() throws Exception { + // A tale of three directories... + Path sbSet = new Path("/Housemartins"); + Path sbNotSpecified = new Path("/INXS"); + Path sbSetOff = new Path("/Easyworld"); - assertTrue(hdfs instanceof DistributedFileSystem); + for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff }) + hdfs.mkdirs(p); - // A tale of three directories... - Path sbSet = new Path("/Housemartins"); - Path sbNotSpecified = new Path("/INXS"); - Path sbSetOff = new Path("/Easyworld"); + // Two directories had there sticky bits set explicitly... + hdfs.setPermission(sbSet, new FsPermission((short) 01777)); + hdfs.setPermission(sbSetOff, new FsPermission((short) 00777)); - for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff }) - hdfs.mkdirs(p); + shutdown(); - // Two directories had there sticky bits set explicitly... - hdfs.setPermission(sbSet, new FsPermission((short) 01777)); - hdfs.setPermission(sbSetOff, new FsPermission((short) 00777)); + // Start file system up again + initCluster(false); - cluster.shutdown(); + assertTrue(hdfs.exists(sbSet)); + assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit()); - // Start file system up again - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build(); - hdfs = cluster.getFileSystem(); + assertTrue(hdfs.exists(sbNotSpecified)); + assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission() + .getStickyBit()); - assertTrue(hdfs.exists(sbSet)); - assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit()); + assertTrue(hdfs.exists(sbSetOff)); + assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit()); + } - assertTrue(hdfs.exists(sbNotSpecified)); - assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission() - .getStickyBit()); + @Test + public void testAclStickyBitPersistence() throws Exception { + // A tale of three directories... + Path sbSet = new Path("/Housemartins"); + Path sbNotSpecified = new Path("/INXS"); + Path sbSetOff = new Path("/Easyworld"); - assertTrue(hdfs.exists(sbSetOff)); - assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit()); + for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff }) + hdfs.mkdirs(p); - } finally { - if (cluster != null) - cluster.shutdown(); - } + // Two directories had there sticky bits set explicitly... + hdfs.setPermission(sbSet, new FsPermission((short) 01777)); + applyAcl(sbSet); + hdfs.setPermission(sbSetOff, new FsPermission((short) 00777)); + applyAcl(sbSetOff); + + shutdown(); + + // Start file system up again + initCluster(false); + + assertTrue(hdfs.exists(sbSet)); + assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit()); + + assertTrue(hdfs.exists(sbNotSpecified)); + assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission() + .getStickyBit()); + + assertTrue(hdfs.exists(sbSetOff)); + assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit()); } /*** * Write a quick file to the specified file system at specified path */ static private void writeFile(FileSystem hdfs, Path p) throws IOException { - FSDataOutputStream o = hdfs.create(p); - o.write("some file contents".getBytes()); - o.close(); + FSDataOutputStream o = null; + try { + o = hdfs.create(p); + o.write("some file contents".getBytes()); + o.close(); + o = null; + } finally { + IOUtils.cleanup(null, o); + } + } + + /** + * Applies an ACL (both access and default) to the given path. + * + * @param p Path to set + * @throws IOException if an ACL could not be modified + */ + private static void applyAcl(Path p) throws IOException { + hdfs.modifyAclEntries(p, Arrays.asList( + aclEntry(ACCESS, USER, user2.getShortUserName(), ALL), + aclEntry(DEFAULT, USER, user2.getShortUserName(), ALL))); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 43eedd6f268..29d7042d72e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs; import com.google.common.base.Charsets; import com.google.common.base.Joiner; +import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; @@ -29,6 +30,7 @@ import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; @@ -1095,6 +1097,8 @@ public class DFSTestUtil { filesystem.removeCacheDirective(id); // OP_REMOVE_CACHE_POOL filesystem.removeCachePool("pool1"); + // OP_SET_ACL + filesystem.setAcl(pathConcatTarget, Lists. newArrayList()); } public static void abortStream(DFSOutputStream out) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 7aaff5a04ee..59e66b413a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; @@ -65,6 +66,7 @@ public class TestSafeMode { public void startUp() throws IOException { conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); @@ -328,12 +330,48 @@ public class TestSafeMode { fs.setTimes(file1, 0, 0); }}); + runFsFun("modifyAclEntries while in SM", new FSRun() { + @Override + public void run(FileSystem fs) throws IOException { + fs.modifyAclEntries(file1, Lists.newArrayList()); + }}); + + runFsFun("removeAclEntries while in SM", new FSRun() { + @Override + public void run(FileSystem fs) throws IOException { + fs.removeAclEntries(file1, Lists.newArrayList()); + }}); + + runFsFun("removeDefaultAcl while in SM", new FSRun() { + @Override + public void run(FileSystem fs) throws IOException { + fs.removeDefaultAcl(file1); + }}); + + runFsFun("removeAcl while in SM", new FSRun() { + @Override + public void run(FileSystem fs) throws IOException { + fs.removeAcl(file1); + }}); + + runFsFun("setAcl while in SM", new FSRun() { + @Override + public void run(FileSystem fs) throws IOException { + fs.setAcl(file1, Lists.newArrayList()); + }}); + try { DFSTestUtil.readFile(fs, file1); } catch (IOException ioe) { fail("Set times failed while in SM"); } + try { + fs.getAclStatus(file1); + } catch (IOException ioe) { + fail("getAclStatus failed while in SM"); + } + assertFalse("Could not leave SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 4c02da18d85..8d12c185af0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -26,6 +26,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; @@ -68,6 +73,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; +import org.junit.Assert; import org.junit.Test; import com.google.common.base.Joiner; @@ -580,4 +586,39 @@ public class TestPBHelper { assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C), HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C); } + + @Test + public void testAclEntryProto() { + // All fields populated. + AclEntry e1 = new AclEntry.Builder().setName("test") + .setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT) + .setType(AclEntryType.OTHER).build(); + // No name. + AclEntry e2 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER).setPermission(FsAction.ALL).build(); + // No permission, which will default to the 0'th enum element. + AclEntry e3 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER).setName("test").build(); + AclEntry[] expected = new AclEntry[] { e1, e2, + new AclEntry.Builder() + .setScope(e3.getScope()) + .setType(e3.getType()) + .setName(e3.getName()) + .setPermission(FsAction.NONE) + .build() }; + AclEntry[] actual = Lists.newArrayList( + PBHelper.convertAclEntry(PBHelper.convertAclEntryProto(Lists + .newArrayList(e1, e2, e3)))).toArray(new AclEntry[0]); + Assert.assertArrayEquals(expected, actual); + } + + @Test + public void testAclStatusProto() { + AclEntry e = new AclEntry.Builder().setName("test") + .setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT) + .setType(AclEntryType.OTHER).build(); + AclStatus s = new AclStatus.Builder().owner("foo").group("bar").addEntry(e) + .build(); + Assert.assertEquals(s, PBHelper.convert(PBHelper.convert(s))); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java new file mode 100644 index 00000000000..08af1bb9aad --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.junit.Assert.*; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * Helper methods useful for writing ACL tests. + */ +public final class AclTestHelpers { + + /** + * Create a new AclEntry with scope, type and permission (no name). + * + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param permission FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + FsAction permission) { + return new AclEntry.Builder() + .setScope(scope) + .setType(type) + .setPermission(permission) + .build(); + } + + /** + * Create a new AclEntry with scope, type, name and permission. + * + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param name String optional ACL entry name + * @param permission FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + String name, FsAction permission) { + return new AclEntry.Builder() + .setScope(scope) + .setType(type) + .setName(name) + .setPermission(permission) + .build(); + } + + /** + * Create a new AclEntry with scope, type and name (no permission). + * + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param name String optional ACL entry name + * @return AclEntry new AclEntry + */ + public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + String name) { + return new AclEntry.Builder() + .setScope(scope) + .setType(type) + .setName(name) + .build(); + } + + /** + * Create a new AclEntry with scope and type (no name or permission). + * + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @return AclEntry new AclEntry + */ + public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type) { + return new AclEntry.Builder() + .setScope(scope) + .setType(type) + .build(); + } + + /** + * Asserts that permission is denied to the given fs/user for the given file. + * + * @param fs FileSystem to check + * @param user UserGroupInformation owner of fs + * @param pathToCheck Path file to check + * @throws Exception if there is an unexpected error + */ + public static void assertFilePermissionDenied(FileSystem fs, + UserGroupInformation user, Path pathToCheck) throws Exception { + try { + DFSTestUtil.readFileBuffer(fs, pathToCheck); + fail("expected AccessControlException for user " + user + ", path = " + + pathToCheck); + } catch (AccessControlException e) { + // expected + } + } + + /** + * Asserts that permission is granted to the given fs/user for the given file. + * + * @param fs FileSystem to check + * @param user UserGroupInformation owner of fs + * @param pathToCheck Path file to check + * @throws Exception if there is an unexpected error + */ + public static void assertFilePermissionGranted(FileSystem fs, + UserGroupInformation user, Path pathToCheck) throws Exception { + try { + DFSTestUtil.readFileBuffer(fs, pathToCheck); + } catch (AccessControlException e) { + fail("expected permission granted for user " + user + ", path = " + + pathToCheck); + } + } + + /** + * Asserts the value of the FsPermission bits on the inode of a specific path. + * + * @param fs FileSystem to use for check + * @param pathToCheck Path inode to check + * @param perm short expected permission bits + * @throws IOException thrown if there is an I/O error + */ + public static void assertPermission(FileSystem fs, Path pathToCheck, + short perm) throws IOException { + assertEquals(perm, fs.getFileStatus(pathToCheck).getPermission().toShort()); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java new file mode 100644 index 00000000000..d3dc844d015 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -0,0 +1,1308 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.junit.Assert.*; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.AclException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +/** + * Tests NameNode interaction for all ACL modification APIs. This test suite + * also covers interaction of setPermission with inodes that have ACLs. + */ +public abstract class FSAclBaseTest { + private static final UserGroupInformation BRUCE = + UserGroupInformation.createUserForTesting("bruce", new String[] { }); + private static final UserGroupInformation DIANA = + UserGroupInformation.createUserForTesting("diana", new String[] { }); + private static final UserGroupInformation SUPERGROUP_MEMBER = + UserGroupInformation.createUserForTesting("super", new String[] { + DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT }); + + protected static MiniDFSCluster cluster; + protected static Configuration conf; + private static int pathCount = 0; + private static Path path; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + private FileSystem fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember; + + @AfterClass + public static void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Before + public void setUp() throws Exception { + pathCount += 1; + path = new Path("/p" + pathCount); + initFileSystems(); + } + + @After + public void destroyFileSystems() { + IOUtils.cleanup(null, fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember); + fs = fsAsBruce = fsAsDiana = fsAsSupergroupMember = null; + } + + @Test + public void testModifyAclEntries() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(DEFAULT, USER, "foo", READ_EXECUTE)); + fs.modifyAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0750); + assertAclFeature(true); + } + + @Test + public void testModifyAclEntriesOnlyAccess() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo", READ_EXECUTE)); + fs.modifyAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertPermission((short)0750); + assertAclFeature(true); + } + + @Test + public void testModifyAclEntriesOnlyDefault() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_EXECUTE)); + fs.modifyAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0750); + assertAclFeature(true); + } + + @Test + public void testModifyAclEntriesMinimal() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo", READ_WRITE)); + fs.modifyAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", READ_WRITE), + aclEntry(ACCESS, GROUP, READ) }, returned); + assertPermission((short)0660); + assertAclFeature(true); + } + + @Test + public void testModifyAclEntriesMinimalDefault() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE)); + fs.modifyAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0750); + assertAclFeature(true); + } + + @Test + public void testModifyAclEntriesCustomMask() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, MASK, NONE)); + fs.modifyAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ) }, returned); + assertPermission((short)0600); + assertAclFeature(true); + } + + @Test + public void testModifyAclEntriesStickyBit() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)01750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(DEFAULT, USER, "foo", READ_EXECUTE)); + fs.modifyAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)01750); + assertAclFeature(true); + } + + @Test(expected=FileNotFoundException.class) + public void testModifyAclEntriesPathNotFound() throws IOException { + // Path has not been created. + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE)); + fs.modifyAclEntries(path, aclSpec); + } + + @Test(expected=AclException.class) + public void testModifyAclEntriesDefaultOnFile() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.modifyAclEntries(path, aclSpec); + } + + @Test + public void testRemoveAclEntries() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo"), + aclEntry(DEFAULT, USER, "foo")); + fs.removeAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0750); + assertAclFeature(true); + } + + @Test + public void testRemoveAclEntriesOnlyAccess() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, USER, "bar", READ_WRITE), + aclEntry(ACCESS, GROUP, READ_WRITE), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo")); + fs.removeAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bar", READ_WRITE), + aclEntry(ACCESS, GROUP, READ_WRITE) }, returned); + assertPermission((short)0760); + assertAclFeature(true); + } + + @Test + public void testRemoveAclEntriesOnlyDefault() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, USER, "bar", READ_EXECUTE)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo")); + fs.removeAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "bar", READ_EXECUTE), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0750); + assertAclFeature(true); + } + + @Test + public void testRemoveAclEntriesMinimal() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0760)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_WRITE), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo"), + aclEntry(ACCESS, MASK)); + fs.removeAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission((short)0760); + assertAclFeature(false); + } + + + @Test + public void testRemoveAclEntriesMinimalDefault() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo"), + aclEntry(ACCESS, MASK), + aclEntry(DEFAULT, USER, "foo"), + aclEntry(DEFAULT, MASK)); + fs.removeAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0750); + assertAclFeature(true); + } + + @Test + public void testRemoveAclEntriesStickyBit() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)01750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo"), + aclEntry(DEFAULT, USER, "foo")); + fs.removeAclEntries(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)01750); + assertAclFeature(true); + } + + @Test(expected=FileNotFoundException.class) + public void testRemoveAclEntriesPathNotFound() throws IOException { + // Path has not been created. + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo")); + fs.removeAclEntries(path, aclSpec); + } + + @Test + public void testRemoveDefaultAcl() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + fs.removeDefaultAcl(path); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertPermission((short)0770); + assertAclFeature(true); + } + + @Test + public void testRemoveDefaultAclOnlyAccess() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + fs.removeDefaultAcl(path); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertPermission((short)0770); + assertAclFeature(true); + } + + @Test + public void testRemoveDefaultAclOnlyDefault() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + fs.removeDefaultAcl(path); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission((short)0750); + assertAclFeature(false); + } + + @Test + public void testRemoveDefaultAclMinimal() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + fs.removeDefaultAcl(path); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission((short)0750); + assertAclFeature(false); + } + + @Test + public void testRemoveDefaultAclStickyBit() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)01750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + fs.removeDefaultAcl(path); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertPermission((short)01770); + assertAclFeature(true); + } + + @Test(expected=FileNotFoundException.class) + public void testRemoveDefaultAclPathNotFound() throws IOException { + // Path has not been created. + fs.removeDefaultAcl(path); + } + + @Test + public void testRemoveAcl() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + fs.removeAcl(path); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission((short)0750); + assertAclFeature(false); + } + + @Test + public void testRemoveAclMinimalAcl() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + fs.removeAcl(path); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission((short)0640); + assertAclFeature(false); + } + + @Test + public void testRemoveAclStickyBit() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)01750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + fs.removeAcl(path); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission((short)01750); + assertAclFeature(false); + } + + @Test + public void testRemoveAclOnlyDefault() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + fs.removeAcl(path); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission((short)0750); + assertAclFeature(false); + } + + @Test(expected=FileNotFoundException.class) + public void testRemoveAclPathNotFound() throws IOException { + // Path has not been created. + fs.removeAcl(path); + } + + @Test + public void testSetAcl() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0770); + assertAclFeature(true); + } + + @Test + public void testSetAclOnlyAccess() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, "foo", READ), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", READ), + aclEntry(ACCESS, GROUP, READ) }, returned); + assertPermission((short)0640); + assertAclFeature(true); + } + + @Test + public void testSetAclOnlyDefault() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0750); + assertAclFeature(true); + } + + @Test + public void testSetAclMinimal() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0644)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, "foo", READ), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission((short)0640); + assertAclFeature(false); + } + + @Test + public void testSetAclMinimalDefault() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE)); + fs.setAcl(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0750); + assertAclFeature(true); + } + + @Test + public void testSetAclCustomMask() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, "foo", READ), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, MASK, ALL), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", READ), + aclEntry(ACCESS, GROUP, READ) }, returned); + assertPermission((short)0670); + assertAclFeature(true); + } + + @Test + public void testSetAclStickyBit() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)01750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)01770); + assertAclFeature(true); + } + + @Test(expected=FileNotFoundException.class) + public void testSetAclPathNotFound() throws IOException { + // Path has not been created. + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, "foo", READ), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + } + + @Test(expected=AclException.class) + public void testSetAclDefaultOnFile() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + } + + @Test + public void testSetPermission() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + fs.setPermission(path, FsPermission.createImmutable((short)0700)); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0700); + assertAclFeature(true); + } + + @Test + public void testSetPermissionOnlyAccess() throws IOException { + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, "foo", READ), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE)); + fs.setAcl(path, aclSpec); + fs.setPermission(path, FsPermission.createImmutable((short)0600)); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", READ), + aclEntry(ACCESS, GROUP, READ) }, returned); + assertPermission((short)0600); + assertAclFeature(true); + } + + @Test + public void testSetPermissionOnlyDefault() throws IOException { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + fs.setPermission(path, FsPermission.createImmutable((short)0700)); + AclStatus s = fs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0700); + assertAclFeature(true); + } + + @Test + public void testDefaultAclNewFile() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + Path filePath = new Path(path, "file1"); + fs.create(filePath).close(); + AclStatus s = fs.getAclStatus(filePath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertPermission(filePath, (short)0640); + assertAclFeature(filePath, true); + } + + @Test + public void testOnlyAccessAclNewFile() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo", ALL)); + fs.modifyAclEntries(path, aclSpec); + Path filePath = new Path(path, "file1"); + fs.create(filePath).close(); + AclStatus s = fs.getAclStatus(filePath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission(filePath, (short)0644); + assertAclFeature(filePath, false); + } + + @Test + public void testDefaultMinimalAclNewFile() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE)); + fs.setAcl(path, aclSpec); + Path filePath = new Path(path, "file1"); + fs.create(filePath).close(); + AclStatus s = fs.getAclStatus(filePath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission(filePath, (short)0640); + assertAclFeature(filePath, false); + } + + @Test + public void testDefaultAclNewDir() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + Path dirPath = new Path(path, "dir1"); + fs.mkdirs(dirPath); + AclStatus s = fs.getAclStatus(dirPath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission(dirPath, (short)0750); + assertAclFeature(dirPath, true); + } + + @Test + public void testOnlyAccessAclNewDir() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "foo", ALL)); + fs.modifyAclEntries(path, aclSpec); + Path dirPath = new Path(path, "dir1"); + fs.mkdirs(dirPath); + AclStatus s = fs.getAclStatus(dirPath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission(dirPath, (short)0755); + assertAclFeature(dirPath, false); + } + + @Test + public void testDefaultMinimalAclNewDir() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE)); + fs.setAcl(path, aclSpec); + Path dirPath = new Path(path, "dir1"); + fs.mkdirs(dirPath); + AclStatus s = fs.getAclStatus(dirPath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission(dirPath, (short)0750); + assertAclFeature(dirPath, true); + } + + @Test + public void testDefaultAclNewFileIntermediate() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + Path dirPath = new Path(path, "dir1"); + Path filePath = new Path(dirPath, "file1"); + fs.create(filePath).close(); + AclEntry[] expected = new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE) }; + AclStatus s = fs.getAclStatus(dirPath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission(dirPath, (short)0750); + assertAclFeature(dirPath, true); + expected = new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE) }; + s = fs.getAclStatus(filePath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission(filePath, (short)0640); + assertAclFeature(filePath, true); + } + + @Test + public void testDefaultAclNewDirIntermediate() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + Path dirPath = new Path(path, "dir1"); + Path subdirPath = new Path(dirPath, "subdir1"); + fs.mkdirs(subdirPath); + AclEntry[] expected = new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE) }; + AclStatus s = fs.getAclStatus(dirPath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission(dirPath, (short)0750); + assertAclFeature(dirPath, true); + s = fs.getAclStatus(subdirPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission(subdirPath, (short)0750); + assertAclFeature(subdirPath, true); + } + + @Test + public void testDefaultAclNewSymlinkIntermediate() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + Path filePath = new Path(path, "file1"); + fs.create(filePath).close(); + fs.setPermission(filePath, FsPermission.createImmutable((short)0640)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + Path dirPath = new Path(path, "dir1"); + Path linkPath = new Path(dirPath, "link1"); + fs.createSymlink(filePath, linkPath, true); + AclEntry[] expected = new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE) }; + AclStatus s = fs.getAclStatus(dirPath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission(dirPath, (short)0750); + assertAclFeature(dirPath, true); + expected = new AclEntry[] { }; + s = fs.getAclStatus(linkPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission(linkPath, (short)0640); + assertAclFeature(linkPath, false); + s = fs.getAclStatus(filePath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission(filePath, (short)0640); + assertAclFeature(filePath, false); + } + + @Test + public void testDefaultAclNewFileWithMode() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0755)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + Path filePath = new Path(path, "file1"); + int bufferSize = cluster.getConfiguration(0).getInt( + CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, + CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_DEFAULT); + fs.create(filePath, new FsPermission((short)0740), false, bufferSize, + fs.getDefaultReplication(filePath), fs.getDefaultBlockSize(path), null) + .close(); + AclStatus s = fs.getAclStatus(filePath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertPermission(filePath, (short)0740); + assertAclFeature(filePath, true); + } + + @Test + public void testDefaultAclNewDirWithMode() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0755)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(path, aclSpec); + Path dirPath = new Path(path, "dir1"); + fs.mkdirs(dirPath, new FsPermission((short)0740)); + AclStatus s = fs.getAclStatus(dirPath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, READ_EXECUTE) }, returned); + assertPermission(dirPath, (short)0740); + assertAclFeature(dirPath, true); + } + + @Test + public void testSkipAclEnforcementPermsDisabled() throws Exception { + Path bruceDir = new Path(path, "bruce"); + Path bruceFile = new Path(bruceDir, "file"); + fs.mkdirs(bruceDir); + fs.setOwner(bruceDir, "bruce", null); + fsAsBruce.create(bruceFile).close(); + fsAsBruce.modifyAclEntries(bruceFile, Lists.newArrayList( + aclEntry(ACCESS, USER, "diana", NONE))); + assertFilePermissionDenied(fsAsDiana, DIANA, bruceFile); + try { + conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); + destroyFileSystems(); + restartCluster(); + initFileSystems(); + assertFilePermissionGranted(fsAsDiana, DIANA, bruceFile); + } finally { + conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); + restartCluster(); + } + } + + @Test + public void testSkipAclEnforcementSuper() throws Exception { + Path bruceDir = new Path(path, "bruce"); + Path bruceFile = new Path(bruceDir, "file"); + fs.mkdirs(bruceDir); + fs.setOwner(bruceDir, "bruce", null); + fsAsBruce.create(bruceFile).close(); + fsAsBruce.modifyAclEntries(bruceFile, Lists.newArrayList( + aclEntry(ACCESS, USER, "diana", NONE))); + assertFilePermissionGranted(fs, DIANA, bruceFile); + assertFilePermissionGranted(fsAsBruce, DIANA, bruceFile); + assertFilePermissionDenied(fsAsDiana, DIANA, bruceFile); + assertFilePermissionGranted(fsAsSupergroupMember, SUPERGROUP_MEMBER, + bruceFile); + } + + @Test + public void testModifyAclEntriesMustBeOwnerOrSuper() throws Exception { + Path bruceDir = new Path(path, "bruce"); + Path bruceFile = new Path(bruceDir, "file"); + fs.mkdirs(bruceDir); + fs.setOwner(bruceDir, "bruce", null); + fsAsBruce.create(bruceFile).close(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "diana", ALL)); + fsAsBruce.modifyAclEntries(bruceFile, aclSpec); + fs.modifyAclEntries(bruceFile, aclSpec); + fsAsSupergroupMember.modifyAclEntries(bruceFile, aclSpec); + exception.expect(AccessControlException.class); + fsAsDiana.modifyAclEntries(bruceFile, aclSpec); + } + + @Test + public void testRemoveAclEntriesMustBeOwnerOrSuper() throws Exception { + Path bruceDir = new Path(path, "bruce"); + Path bruceFile = new Path(bruceDir, "file"); + fs.mkdirs(bruceDir); + fs.setOwner(bruceDir, "bruce", null); + fsAsBruce.create(bruceFile).close(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "diana")); + fsAsBruce.removeAclEntries(bruceFile, aclSpec); + fs.removeAclEntries(bruceFile, aclSpec); + fsAsSupergroupMember.removeAclEntries(bruceFile, aclSpec); + exception.expect(AccessControlException.class); + fsAsDiana.removeAclEntries(bruceFile, aclSpec); + } + + @Test + public void testRemoveDefaultAclMustBeOwnerOrSuper() throws Exception { + Path bruceDir = new Path(path, "bruce"); + Path bruceFile = new Path(bruceDir, "file"); + fs.mkdirs(bruceDir); + fs.setOwner(bruceDir, "bruce", null); + fsAsBruce.create(bruceFile).close(); + fsAsBruce.removeDefaultAcl(bruceFile); + fs.removeDefaultAcl(bruceFile); + fsAsSupergroupMember.removeDefaultAcl(bruceFile); + exception.expect(AccessControlException.class); + fsAsDiana.removeDefaultAcl(bruceFile); + } + + @Test + public void testRemoveAclMustBeOwnerOrSuper() throws Exception { + Path bruceDir = new Path(path, "bruce"); + Path bruceFile = new Path(bruceDir, "file"); + fs.mkdirs(bruceDir); + fs.setOwner(bruceDir, "bruce", null); + fsAsBruce.create(bruceFile).close(); + fsAsBruce.removeAcl(bruceFile); + fs.removeAcl(bruceFile); + fsAsSupergroupMember.removeAcl(bruceFile); + exception.expect(AccessControlException.class); + fsAsDiana.removeAcl(bruceFile); + } + + @Test + public void testSetAclMustBeOwnerOrSuper() throws Exception { + Path bruceDir = new Path(path, "bruce"); + Path bruceFile = new Path(bruceDir, "file"); + fs.mkdirs(bruceDir); + fs.setOwner(bruceDir, "bruce", null); + fsAsBruce.create(bruceFile).close(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, "diana", READ_WRITE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, READ)); + fsAsBruce.setAcl(bruceFile, aclSpec); + fs.setAcl(bruceFile, aclSpec); + fsAsSupergroupMember.setAcl(bruceFile, aclSpec); + exception.expect(AccessControlException.class); + fsAsDiana.setAcl(bruceFile, aclSpec); + } + + @Test + public void testGetAclStatusRequiresTraverseOrSuper() throws Exception { + Path bruceDir = new Path(path, "bruce"); + Path bruceFile = new Path(bruceDir, "file"); + fs.mkdirs(bruceDir); + fs.setOwner(bruceDir, "bruce", null); + fsAsBruce.create(bruceFile).close(); + fsAsBruce.setAcl(bruceDir, Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "diana", READ), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, OTHER, NONE))); + fsAsBruce.getAclStatus(bruceFile); + fs.getAclStatus(bruceFile); + fsAsSupergroupMember.getAclStatus(bruceFile); + exception.expect(AccessControlException.class); + fsAsDiana.getAclStatus(bruceFile); + } + + /** + * Creates a FileSystem for the super-user. + * + * @return FileSystem for super-user + * @throws Exception if creation fails + */ + protected FileSystem createFileSystem() throws Exception { + return cluster.getFileSystem(); + } + + /** + * Creates a FileSystem for a specific user. + * + * @param user UserGroupInformation specific user + * @return FileSystem for specific user + * @throws Exception if creation fails + */ + protected FileSystem createFileSystem(UserGroupInformation user) + throws Exception { + return DFSTestUtil.getFileSystemAs(user, cluster.getConfiguration(0)); + } + + /** + * Initializes all FileSystem instances used in the tests. + * + * @throws Exception if initialization fails + */ + private void initFileSystems() throws Exception { + fs = createFileSystem(); + fsAsBruce = createFileSystem(BRUCE); + fsAsDiana = createFileSystem(DIANA); + fsAsSupergroupMember = createFileSystem(SUPERGROUP_MEMBER); + } + + /** + * Restarts the cluster without formatting, so all data is preserved. + * + * @throws Exception if restart fails + */ + private void restartCluster() throws Exception { + shutdown(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(false) + .build(); + cluster.waitActive(); + } + + /** + * Asserts whether or not the inode for the test path has an AclFeature. + * + * @param expectAclFeature boolean true if an AclFeature must be present, + * false if an AclFeature must not be present + * @throws IOException thrown if there is an I/O error + */ + private static void assertAclFeature(boolean expectAclFeature) + throws IOException { + assertAclFeature(path, expectAclFeature); + } + + /** + * Asserts whether or not the inode for a specific path has an AclFeature. + * + * @param pathToCheck Path inode to check + * @param expectAclFeature boolean true if an AclFeature must be present, + * false if an AclFeature must not be present + * @throws IOException thrown if there is an I/O error + */ + private static void assertAclFeature(Path pathToCheck, + boolean expectAclFeature) throws IOException { + INode inode = cluster.getNamesystem().getFSDirectory().getRoot() + .getNode(pathToCheck.toUri().getPath(), false); + assertNotNull(inode); + AclFeature aclFeature = inode.getAclFeature(); + if (expectAclFeature) { + assertNotNull(aclFeature); + // Intentionally capturing a reference to the entries, not using nested + // calls. This way, we get compile-time enforcement that the entries are + // stored in an ImmutableList. + ImmutableList entries = aclFeature.getEntries(); + assertNotNull(entries); + assertFalse(entries.isEmpty()); + } else { + assertNull(aclFeature); + } + } + + /** + * Asserts the value of the FsPermission bits on the inode of the test path. + * + * @param perm short expected permission bits + * @throws IOException thrown if there is an I/O error + */ + private void assertPermission(short perm) throws IOException { + assertPermission(path, perm); + } + + /** + * Asserts the value of the FsPermission bits on the inode of a specific path. + * + * @param pathToCheck Path inode to check + * @param perm short expected permission bits + * @throws IOException thrown if there is an I/O error + */ + private void assertPermission(Path pathToCheck, short perm) + throws IOException { + AclTestHelpers.assertPermission(fs, pathToCheck, perm); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index cc42e203ea6..e6742047ba3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -96,6 +96,7 @@ public class OfflineEditsViewerHelper { "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); config.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); + config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build(); cluster.waitClusterUp(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java new file mode 100644 index 00000000000..afb7dc528ce --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java @@ -0,0 +1,189 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.junit.Assert.*; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.AclException; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.io.IOUtils; +import org.junit.After; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import com.google.common.collect.Lists; + +/** + * Tests that the configuration flag that controls support for ACLs is off by + * default and causes all attempted operations related to ACLs to fail. The + * NameNode can still load ACLs from fsimage or edits. + */ +public class TestAclConfigFlag { + private static final Path PATH = new Path("/path"); + + private MiniDFSCluster cluster; + private DistributedFileSystem fs; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @After + public void shutdown() throws Exception { + IOUtils.cleanup(null, fs); + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testModifyAclEntries() throws Exception { + initCluster(true, false); + fs.mkdirs(PATH); + expectException(); + fs.modifyAclEntries(PATH, Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + } + + @Test + public void testRemoveAclEntries() throws Exception { + initCluster(true, false); + fs.mkdirs(PATH); + expectException(); + fs.removeAclEntries(PATH, Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + } + + @Test + public void testRemoveDefaultAcl() throws Exception { + initCluster(true, false); + fs.mkdirs(PATH); + expectException(); + fs.removeAclEntries(PATH, Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + } + + @Test + public void testRemoveAcl() throws Exception { + initCluster(true, false); + fs.mkdirs(PATH); + expectException(); + fs.removeAcl(PATH); + } + + @Test + public void testSetAcl() throws Exception { + initCluster(true, false); + fs.mkdirs(PATH); + expectException(); + fs.setAcl(PATH, Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + } + + @Test + public void testGetAclStatus() throws Exception { + initCluster(true, false); + fs.mkdirs(PATH); + expectException(); + fs.getAclStatus(PATH); + } + + @Test + public void testEditLog() throws Exception { + // With ACLs enabled, set an ACL. + initCluster(true, true); + fs.mkdirs(PATH); + fs.setAcl(PATH, Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + + // Restart with ACLs disabled. Expect successful restart. + restart(false, false); + } + + @Test + public void testFsImage() throws Exception { + // With ACLs enabled, set an ACL. + initCluster(true, true); + fs.mkdirs(PATH); + fs.setAcl(PATH, Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_WRITE))); + + // Save a new checkpoint and restart with ACLs still enabled. + restart(true, true); + + // Restart with ACLs disabled. Expect successful restart. + restart(false, false); + } + + /** + * We expect an AclException, and we want the exception text to state the + * configuration key that controls ACL support. + */ + private void expectException() { + exception.expect(AclException.class); + exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY); + } + + /** + * Initialize the cluster, wait for it to become active, and get FileSystem. + * + * @param format if true, format the NameNode and DataNodes before starting up + * @param aclsEnabled if true, ACL support is enabled + * @throws Exception if any step fails + */ + private void initCluster(boolean format, boolean aclsEnabled) + throws Exception { + Configuration conf = new Configuration(); + // not explicitly setting to false, should be false by default + if (aclsEnabled) { + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + } + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format) + .build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + } + + /** + * Restart the cluster, optionally saving a new checkpoint. + * + * @param checkpoint boolean true to save a new checkpoint + * @param aclsEnabled if true, ACL support is enabled + * @throws Exception if restart fails + */ + private void restart(boolean checkpoint, boolean aclsEnabled) + throws Exception { + NameNode nameNode = cluster.getNameNode(); + if (checkpoint) { + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + } + shutdown(); + initCluster(false, aclsEnabled); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java new file mode 100644 index 00000000000..b646c672a01 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java @@ -0,0 +1,1208 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; +import static org.apache.hadoop.hdfs.server.namenode.AclTransformation.*; +import static org.junit.Assert.*; + +import java.util.List; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import org.junit.Test; + +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.protocol.AclException; +import org.apache.hadoop.hdfs.server.namenode.AclTransformation; + +/** + * Tests operations that modify ACLs. All tests in this suite have been + * cross-validated against Linux setfacl/getfacl to check for consistency of the + * HDFS implementation. + */ +public class TestAclTransformation { + + private static final List ACL_SPEC_TOO_LARGE; + static { + ACL_SPEC_TOO_LARGE = Lists.newArrayListWithCapacity(33); + for (int i = 0; i < 33; ++i) { + ACL_SPEC_TOO_LARGE.add(aclEntry(ACCESS, USER, "user" + i, ALL)); + } + } + + @Test + public void testFilterAclEntriesByAclSpec() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ_WRITE)) + .add(aclEntry(ACCESS, USER, "diana", READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, GROUP, "sales", READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, "execs", READ_WRITE)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "diana"), + aclEntry(ACCESS, GROUP, "sales")); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, GROUP, "execs", READ_WRITE)) + .add(aclEntry(ACCESS, MASK, READ_WRITE)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec)); + } + + @Test + public void testFilterAclEntriesByAclSpecUnchanged() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", ALL)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, "sales", ALL)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "clark"), + aclEntry(ACCESS, GROUP, "execs")); + assertEquals(existing, filterAclEntriesByAclSpec(existing, aclSpec)); + } + + @Test + public void testFilterAclEntriesByAclSpecAccessMaskCalculated() + throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ_WRITE)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "diana")); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec)); + } + + @Test + public void testFilterAclEntriesByAclSpecDefaultMaskCalculated() + throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, USER, "diana", READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ_WRITE)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "diana")); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec)); + } + + @Test + public void testFilterAclEntriesByAclSpecDefaultMaskPreserved() + throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ_WRITE)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "diana", ALL)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "diana")); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "diana", ALL)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec)); + } + + @Test + public void testFilterAclEntriesByAclSpecAccessMaskPreserved() + throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, USER, "diana", READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ_WRITE)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "diana")); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec)); + } + + @Test + public void testFilterAclEntriesByAclSpecAutomaticDefaultUser() + throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, READ_WRITE)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec)); + } + + @Test + public void testFilterAclEntriesByAclSpecAutomaticDefaultGroup() + throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ_WRITE)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, GROUP)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec)); + } + + @Test + public void testFilterAclEntriesByAclSpecAutomaticDefaultOther() + throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ_WRITE)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, OTHER)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ_WRITE)) + .add(aclEntry(DEFAULT, OTHER, READ)) + .build(); + assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec)); + } + + @Test + public void testFilterAclEntriesByAclSpecEmptyAclSpec() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, ALL)) + .add(aclEntry(DEFAULT, OTHER, READ)) + .build(); + List aclSpec = Lists.newArrayList(); + assertEquals(existing, filterAclEntriesByAclSpec(existing, aclSpec)); + } + + @Test(expected=AclException.class) + public void testFilterAclEntriesByAclSpecRemoveAccessMaskRequired() + throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, MASK)); + filterAclEntriesByAclSpec(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testFilterAclEntriesByAclSpecRemoveDefaultMaskRequired() + throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, ALL)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, MASK)); + filterAclEntriesByAclSpec(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testFilterAclEntriesByAclSpecInputTooLarge() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + filterAclEntriesByAclSpec(existing, ACL_SPEC_TOO_LARGE); + } + + @Test + public void testFilterDefaultAclEntries() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, "sales", READ_EXECUTE)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, GROUP, "sales", READ_EXECUTE)) + .add(aclEntry(DEFAULT, MASK, READ_WRITE)) + .add(aclEntry(DEFAULT, OTHER, READ_EXECUTE)) + .build(); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, "sales", READ_EXECUTE)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + assertEquals(expected, filterDefaultAclEntries(existing)); + } + + @Test + public void testFilterDefaultAclEntriesUnchanged() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", ALL)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, "sales", ALL)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + assertEquals(existing, filterDefaultAclEntries(existing)); + } + + @Test + public void testMergeAclEntries() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", ALL)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", ALL)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesUnchanged() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", ALL)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, "sales", ALL)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", ALL)) + .add(aclEntry(DEFAULT, GROUP, READ_EXECUTE)) + .add(aclEntry(DEFAULT, GROUP, "sales", ALL)) + .add(aclEntry(DEFAULT, MASK, ALL)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, GROUP, "sales", ALL), + aclEntry(ACCESS, MASK, ALL), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "bruce", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "sales", ALL), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE)); + assertEquals(existing, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesMultipleNewBeforeExisting() + throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "diana", READ)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, MASK, READ_EXECUTE)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, USER, "clark", READ_EXECUTE), + aclEntry(ACCESS, USER, "diana", READ_EXECUTE)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ_EXECUTE)) + .add(aclEntry(ACCESS, USER, "clark", READ_EXECUTE)) + .add(aclEntry(ACCESS, USER, "diana", READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, MASK, READ_EXECUTE)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesAccessMaskCalculated() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, USER, "diana", READ)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ_EXECUTE)) + .add(aclEntry(ACCESS, USER, "diana", READ)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ_EXECUTE)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesDefaultMaskCalculated() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "bruce", READ_WRITE), + aclEntry(DEFAULT, USER, "diana", READ_EXECUTE)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ_WRITE)) + .add(aclEntry(DEFAULT, USER, "diana", READ_EXECUTE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, ALL)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesDefaultMaskPreserved() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "diana", ALL)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "diana", FsAction.READ_EXECUTE)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "diana", READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ_EXECUTE)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "diana", ALL)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesAccessMaskPreserved() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, USER, "diana", READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ_WRITE)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "diana", READ_EXECUTE)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, USER, "diana", READ_EXECUTE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ_EXECUTE)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesAutomaticDefaultUser() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, READ)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, GROUP, READ_EXECUTE)) + .add(aclEntry(DEFAULT, OTHER, READ)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesAutomaticDefaultGroup() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, READ)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, READ_EXECUTE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, OTHER, READ)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesAutomaticDefaultOther() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, READ_EXECUTE), + aclEntry(DEFAULT, GROUP, READ_EXECUTE)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, READ_EXECUTE)) + .add(aclEntry(DEFAULT, GROUP, READ_EXECUTE)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesProvidedAccessMask() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, MASK, ALL)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesProvidedDefaultMask() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, ALL)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, mergeAclEntries(existing, aclSpec)); + } + + @Test + public void testMergeAclEntriesEmptyAclSpec() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, ALL)) + .add(aclEntry(DEFAULT, OTHER, READ)) + .build(); + List aclSpec = Lists.newArrayList(); + assertEquals(existing, mergeAclEntries(existing, aclSpec)); + } + + @Test(expected=AclException.class) + public void testMergeAclEntriesInputTooLarge() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + mergeAclEntries(existing, ACL_SPEC_TOO_LARGE); + } + + @Test(expected=AclException.class) + public void testMergeAclEntriesResultTooLarge() throws AclException { + ImmutableList.Builder aclBuilder = + new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)); + for (int i = 1; i <= 28; ++i) { + aclBuilder.add(aclEntry(ACCESS, USER, "user" + i, READ)); + } + aclBuilder + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)); + List existing = aclBuilder.build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ)); + mergeAclEntries(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testMergeAclEntriesDuplicateEntries() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", ALL), + aclEntry(ACCESS, USER, "diana", READ_WRITE), + aclEntry(ACCESS, USER, "clark", READ), + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE)); + mergeAclEntries(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testMergeAclEntriesNamedMask() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, MASK, "bruce", READ_EXECUTE)); + mergeAclEntries(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testMergeAclEntriesNamedOther() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, OTHER, "bruce", READ_EXECUTE)); + mergeAclEntries(existing, aclSpec); + } + + @Test + public void testReplaceAclEntries() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", ALL)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, GROUP, "sales", ALL), + aclEntry(ACCESS, MASK, ALL), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "bruce", READ_WRITE), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "sales", ALL), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, "sales", ALL)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ_EXECUTE)) + .add(aclEntry(DEFAULT, GROUP, "sales", ALL)) + .add(aclEntry(DEFAULT, MASK, ALL)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, replaceAclEntries(existing, aclSpec)); + } + + @Test + public void testReplaceAclEntriesUnchanged() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", ALL)) + .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) + .add(aclEntry(ACCESS, GROUP, "sales", ALL)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", ALL)) + .add(aclEntry(DEFAULT, GROUP, READ_EXECUTE)) + .add(aclEntry(DEFAULT, GROUP, "sales", ALL)) + .add(aclEntry(DEFAULT, MASK, ALL)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, GROUP, "sales", ALL), + aclEntry(ACCESS, MASK, ALL), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "bruce", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "sales", ALL), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, NONE)); + assertEquals(existing, replaceAclEntries(existing, aclSpec)); + } + + @Test + public void testReplaceAclEntriesAccessMaskCalculated() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", READ), + aclEntry(ACCESS, USER, "diana", READ_WRITE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, READ)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ_WRITE)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + assertEquals(expected, replaceAclEntries(existing, aclSpec)); + } + + @Test + public void testReplaceAclEntriesDefaultMaskCalculated() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, READ), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "bruce", READ), + aclEntry(DEFAULT, USER, "diana", READ_WRITE), + aclEntry(DEFAULT, GROUP, ALL), + aclEntry(DEFAULT, OTHER, READ)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, USER, "diana", READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, ALL)) + .add(aclEntry(DEFAULT, MASK, ALL)) + .add(aclEntry(DEFAULT, OTHER, READ)) + .build(); + assertEquals(expected, replaceAclEntries(existing, aclSpec)); + } + + @Test + public void testReplaceAclEntriesDefaultMaskPreserved() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ_WRITE)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "diana", ALL)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", READ), + aclEntry(ACCESS, USER, "diana", READ_WRITE), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, READ)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, ALL)) + .add(aclEntry(ACCESS, MASK, ALL)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "diana", ALL)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, replaceAclEntries(existing, aclSpec)); + } + + @Test + public void testReplaceAclEntriesAccessMaskPreserved() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, USER, "diana", READ_WRITE)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ_WRITE)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "bruce", READ), + aclEntry(DEFAULT, GROUP, READ), + aclEntry(DEFAULT, OTHER, NONE)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, USER, "bruce", READ)) + .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, MASK, READ)) + .add(aclEntry(ACCESS, OTHER, READ)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, replaceAclEntries(existing, aclSpec)); + } + + @Test + public void testReplaceAclEntriesAutomaticDefaultUser() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, "bruce", READ), + aclEntry(DEFAULT, GROUP, READ_WRITE), + aclEntry(DEFAULT, MASK, READ_WRITE), + aclEntry(DEFAULT, OTHER, READ)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ_WRITE)) + .add(aclEntry(DEFAULT, MASK, READ_WRITE)) + .add(aclEntry(DEFAULT, OTHER, READ)) + .build(); + assertEquals(expected, replaceAclEntries(existing, aclSpec)); + } + + @Test + public void testReplaceAclEntriesAutomaticDefaultGroup() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, READ_WRITE), + aclEntry(DEFAULT, USER, "bruce", READ), + aclEntry(DEFAULT, MASK, READ), + aclEntry(DEFAULT, OTHER, READ)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, READ_WRITE)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, READ)) + .build(); + assertEquals(expected, replaceAclEntries(existing, aclSpec)); + } + + @Test + public void testReplaceAclEntriesAutomaticDefaultOther() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(DEFAULT, USER, READ_WRITE), + aclEntry(DEFAULT, USER, "bruce", READ), + aclEntry(DEFAULT, GROUP, READ_WRITE), + aclEntry(DEFAULT, MASK, READ_WRITE)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, READ_WRITE)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ_WRITE)) + .add(aclEntry(DEFAULT, MASK, READ_WRITE)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, replaceAclEntries(existing, aclSpec)); + } + + @Test + public void testReplaceAclEntriesOnlyDefaults() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "bruce", READ)); + List expected = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .add(aclEntry(DEFAULT, USER, ALL)) + .add(aclEntry(DEFAULT, USER, "bruce", READ)) + .add(aclEntry(DEFAULT, GROUP, READ)) + .add(aclEntry(DEFAULT, MASK, READ)) + .add(aclEntry(DEFAULT, OTHER, NONE)) + .build(); + assertEquals(expected, replaceAclEntries(existing, aclSpec)); + } + + @Test(expected=AclException.class) + public void testReplaceAclEntriesInputTooLarge() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + replaceAclEntries(existing, ACL_SPEC_TOO_LARGE); + } + + @Test(expected=AclException.class) + public void testReplaceAclEntriesResultTooLarge() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayListWithCapacity(32); + aclSpec.add(aclEntry(ACCESS, USER, ALL)); + for (int i = 1; i <= 29; ++i) { + aclSpec.add(aclEntry(ACCESS, USER, "user" + i, READ)); + } + aclSpec.add(aclEntry(ACCESS, GROUP, READ)); + aclSpec.add(aclEntry(ACCESS, OTHER, NONE)); + // The ACL spec now has 32 entries. Automatic mask calculation will push it + // over the limit to 33. + replaceAclEntries(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testReplaceAclEntriesDuplicateEntries() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", ALL), + aclEntry(ACCESS, USER, "diana", READ_WRITE), + aclEntry(ACCESS, USER, "clark", READ), + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE)); + replaceAclEntries(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testReplaceAclEntriesNamedMask() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(ACCESS, MASK, "bruce", READ_EXECUTE)); + replaceAclEntries(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testReplaceAclEntriesNamedOther() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, NONE), + aclEntry(ACCESS, OTHER, "bruce", READ_EXECUTE)); + replaceAclEntries(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testReplaceAclEntriesMissingUser() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, GROUP, "sales", ALL), + aclEntry(ACCESS, MASK, ALL), + aclEntry(ACCESS, OTHER, NONE)); + replaceAclEntries(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testReplaceAclEntriesMissingGroup() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, "sales", ALL), + aclEntry(ACCESS, MASK, ALL), + aclEntry(ACCESS, OTHER, NONE)); + replaceAclEntries(existing, aclSpec); + } + + @Test(expected=AclException.class) + public void testReplaceAclEntriesMissingOther() throws AclException { + List existing = new ImmutableList.Builder() + .add(aclEntry(ACCESS, USER, ALL)) + .add(aclEntry(ACCESS, GROUP, READ)) + .add(aclEntry(ACCESS, OTHER, NONE)) + .build(); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, GROUP, "sales", ALL), + aclEntry(ACCESS, MASK, ALL)); + replaceAclEntries(existing, aclSpec); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java new file mode 100644 index 00000000000..03889ea079e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java @@ -0,0 +1,227 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public class TestFSImageWithAcl { + private static Configuration conf; + private static MiniDFSCluster cluster; + + @BeforeClass + public static void setUp() throws IOException { + conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + } + + @AfterClass + public static void tearDown() { + cluster.shutdown(); + } + + private void testAcl(boolean persistNamespace) throws IOException { + Path p = new Path("/p"); + DistributedFileSystem fs = cluster.getFileSystem(); + fs.create(p).close(); + fs.mkdirs(new Path("/23")); + + AclEntry e = new AclEntry.Builder().setName("foo") + .setPermission(READ_EXECUTE).setScope(ACCESS).setType(USER).build(); + fs.modifyAclEntries(p, Lists.newArrayList(e)); + + restart(fs, persistNamespace); + + AclStatus s = cluster.getNamesystem().getAclStatus(p.toString()); + AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray( + new AclEntry[0]); + Assert.assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ) }, returned); + + fs.removeAcl(p); + + if (persistNamespace) { + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + fs.saveNamespace(); + fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + } + + cluster.restartNameNode(); + cluster.waitActive(); + + s = cluster.getNamesystem().getAclStatus(p.toString()); + returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]); + Assert.assertArrayEquals(new AclEntry[] { }, returned); + + fs.modifyAclEntries(p, Lists.newArrayList(e)); + s = cluster.getNamesystem().getAclStatus(p.toString()); + returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]); + Assert.assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "foo", READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ) }, returned); + } + + @Test + public void testPersistAcl() throws IOException { + testAcl(true); + } + + @Test + public void testAclEditLog() throws IOException { + testAcl(false); + } + + private void doTestDefaultAclNewChildren(boolean persistNamespace) + throws IOException { + Path dirPath = new Path("/dir"); + Path filePath = new Path(dirPath, "file1"); + Path subdirPath = new Path(dirPath, "subdir1"); + DistributedFileSystem fs = cluster.getFileSystem(); + fs.mkdirs(dirPath); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", ALL)); + fs.setAcl(dirPath, aclSpec); + + fs.create(filePath).close(); + fs.mkdirs(subdirPath); + + AclEntry[] fileExpected = new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE) }; + AclEntry[] subdirExpected = new AclEntry[] { + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "foo", ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), + aclEntry(DEFAULT, OTHER, READ_EXECUTE) }; + + AclEntry[] fileReturned = fs.getAclStatus(filePath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(fileExpected, fileReturned); + AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertPermission(fs, subdirPath, (short)0755); + + restart(fs, persistNamespace); + + fileReturned = fs.getAclStatus(filePath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(fileExpected, fileReturned); + subdirReturned = fs.getAclStatus(subdirPath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertPermission(fs, subdirPath, (short)0755); + + aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)); + fs.modifyAclEntries(dirPath, aclSpec); + + fileReturned = fs.getAclStatus(filePath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(fileExpected, fileReturned); + subdirReturned = fs.getAclStatus(subdirPath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertPermission(fs, subdirPath, (short)0755); + + restart(fs, persistNamespace); + + fileReturned = fs.getAclStatus(filePath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(fileExpected, fileReturned); + subdirReturned = fs.getAclStatus(subdirPath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertPermission(fs, subdirPath, (short)0755); + + fs.removeAcl(dirPath); + + fileReturned = fs.getAclStatus(filePath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(fileExpected, fileReturned); + subdirReturned = fs.getAclStatus(subdirPath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertPermission(fs, subdirPath, (short)0755); + + restart(fs, persistNamespace); + + fileReturned = fs.getAclStatus(filePath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(fileExpected, fileReturned); + subdirReturned = fs.getAclStatus(subdirPath).getEntries() + .toArray(new AclEntry[0]); + Assert.assertArrayEquals(subdirExpected, subdirReturned); + assertPermission(fs, subdirPath, (short)0755); + } + + @Test + public void testFsImageDefaultAclNewChildren() throws IOException { + doTestDefaultAclNewChildren(true); + } + + @Test + public void testEditLogDefaultAclNewChildren() throws IOException { + doTestDefaultAclNewChildren(false); + } + + /** + * Restart the NameNode, optionally saving a new checkpoint. + * + * @param fs DistributedFileSystem used for saving namespace + * @param persistNamespace boolean true to save a new checkpoint + * @throws IOException if restart fails + */ + private void restart(DistributedFileSystem fs, boolean persistNamespace) + throws IOException { + if (persistNamespace) { + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + fs.saveNamespace(); + fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + } + + cluster.restartNameNode(); + cluster.waitActive(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java new file mode 100644 index 00000000000..fd812794623 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java @@ -0,0 +1,417 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; +import static org.junit.Assert.*; + +import java.io.IOException; +import java.util.Arrays; + +import org.junit.Before; +import org.junit.Test; + +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * Unit tests covering FSPermissionChecker. All tests in this suite have been + * cross-validated against Linux setfacl/getfacl to check for consistency of the + * HDFS implementation. + */ +public class TestFSPermissionChecker { + private static final long PREFERRED_BLOCK_SIZE = 128 * 1024 * 1024; + private static final short REPLICATION = 3; + private static final String SUPERGROUP = "supergroup"; + private static final String SUPERUSER = "superuser"; + private static final UserGroupInformation BRUCE = + UserGroupInformation.createUserForTesting("bruce", new String[] { }); + private static final UserGroupInformation DIANA = + UserGroupInformation.createUserForTesting("diana", new String[] { "sales" }); + private static final UserGroupInformation CLARK = + UserGroupInformation.createUserForTesting("clark", new String[] { "execs" }); + + private INodeDirectory inodeRoot; + + @Before + public void setUp() { + PermissionStatus permStatus = PermissionStatus.createImmutable(SUPERUSER, + SUPERGROUP, FsPermission.createImmutable((short)0755)); + inodeRoot = new INodeDirectory(INodeId.ROOT_INODE_ID, + INodeDirectory.ROOT_NAME, permStatus, 0L); + } + + @Test + public void testAclOwner() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs", + (short)0640); + addAcl(inodeFile, + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, MASK, READ), + aclEntry(ACCESS, OTHER, NONE)); + assertPermissionGranted(BRUCE, "/file1", READ); + assertPermissionGranted(BRUCE, "/file1", WRITE); + assertPermissionGranted(BRUCE, "/file1", READ_WRITE); + assertPermissionDenied(BRUCE, "/file1", EXECUTE); + assertPermissionDenied(DIANA, "/file1", READ); + assertPermissionDenied(DIANA, "/file1", WRITE); + assertPermissionDenied(DIANA, "/file1", EXECUTE); + } + + @Test + public void testAclNamedUser() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs", + (short)0640); + addAcl(inodeFile, + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, "diana", READ), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, MASK, READ), + aclEntry(ACCESS, OTHER, NONE)); + assertPermissionGranted(DIANA, "/file1", READ); + assertPermissionDenied(DIANA, "/file1", WRITE); + assertPermissionDenied(DIANA, "/file1", EXECUTE); + assertPermissionDenied(DIANA, "/file1", READ_WRITE); + assertPermissionDenied(DIANA, "/file1", READ_EXECUTE); + assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE); + assertPermissionDenied(DIANA, "/file1", ALL); + } + + @Test + public void testAclNamedUserDeny() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs", + (short)0644); + addAcl(inodeFile, + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, "diana", NONE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, MASK, READ), + aclEntry(ACCESS, OTHER, READ)); + assertPermissionGranted(BRUCE, "/file1", READ_WRITE); + assertPermissionGranted(CLARK, "/file1", READ); + assertPermissionDenied(DIANA, "/file1", READ); + } + + @Test + public void testAclNamedUserTraverseDeny() throws IOException { + INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce", + "execs", (short)0755); + INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs", + (short)0644); + addAcl(inodeDir, + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "diana", NONE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, MASK, READ_EXECUTE), + aclEntry(ACCESS, OTHER, READ_EXECUTE)); + assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE); + assertPermissionGranted(CLARK, "/dir1/file1", READ); + assertPermissionDenied(DIANA, "/dir1/file1", READ); + assertPermissionDenied(DIANA, "/dir1/file1", WRITE); + assertPermissionDenied(DIANA, "/dir1/file1", EXECUTE); + assertPermissionDenied(DIANA, "/dir1/file1", READ_WRITE); + assertPermissionDenied(DIANA, "/dir1/file1", READ_EXECUTE); + assertPermissionDenied(DIANA, "/dir1/file1", WRITE_EXECUTE); + assertPermissionDenied(DIANA, "/dir1/file1", ALL); + } + + @Test + public void testAclNamedUserMask() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs", + (short)0620); + addAcl(inodeFile, + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, "diana", READ), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, MASK, WRITE), + aclEntry(ACCESS, OTHER, NONE)); + assertPermissionDenied(DIANA, "/file1", READ); + assertPermissionDenied(DIANA, "/file1", WRITE); + assertPermissionDenied(DIANA, "/file1", EXECUTE); + assertPermissionDenied(DIANA, "/file1", READ_WRITE); + assertPermissionDenied(DIANA, "/file1", READ_EXECUTE); + assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE); + assertPermissionDenied(DIANA, "/file1", ALL); + } + + @Test + public void testAclGroup() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs", + (short)0640); + addAcl(inodeFile, + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, MASK, READ), + aclEntry(ACCESS, OTHER, NONE)); + assertPermissionGranted(CLARK, "/file1", READ); + assertPermissionDenied(CLARK, "/file1", WRITE); + assertPermissionDenied(CLARK, "/file1", EXECUTE); + assertPermissionDenied(CLARK, "/file1", READ_WRITE); + assertPermissionDenied(CLARK, "/file1", READ_EXECUTE); + assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE); + assertPermissionDenied(CLARK, "/file1", ALL); + } + + @Test + public void testAclGroupDeny() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales", + (short)0604); + addAcl(inodeFile, + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, MASK, NONE), + aclEntry(ACCESS, OTHER, READ)); + assertPermissionGranted(BRUCE, "/file1", READ_WRITE); + assertPermissionGranted(CLARK, "/file1", READ); + assertPermissionDenied(DIANA, "/file1", READ); + assertPermissionDenied(DIANA, "/file1", WRITE); + assertPermissionDenied(DIANA, "/file1", EXECUTE); + assertPermissionDenied(DIANA, "/file1", READ_WRITE); + assertPermissionDenied(DIANA, "/file1", READ_EXECUTE); + assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE); + assertPermissionDenied(DIANA, "/file1", ALL); + } + + @Test + public void testAclGroupTraverseDeny() throws IOException { + INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce", + "execs", (short)0755); + INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs", + (short)0644); + addAcl(inodeDir, + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, MASK, NONE), + aclEntry(ACCESS, OTHER, READ_EXECUTE)); + assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE); + assertPermissionGranted(DIANA, "/dir1/file1", READ); + assertPermissionDenied(CLARK, "/dir1/file1", READ); + assertPermissionDenied(CLARK, "/dir1/file1", WRITE); + assertPermissionDenied(CLARK, "/dir1/file1", EXECUTE); + assertPermissionDenied(CLARK, "/dir1/file1", READ_WRITE); + assertPermissionDenied(CLARK, "/dir1/file1", READ_EXECUTE); + assertPermissionDenied(CLARK, "/dir1/file1", WRITE_EXECUTE); + assertPermissionDenied(CLARK, "/dir1/file1", ALL); + } + + @Test + public void testAclGroupTraverseDenyOnlyDefaultEntries() throws IOException { + INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce", + "execs", (short)0755); + INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs", + (short)0644); + addAcl(inodeDir, + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, OTHER, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, "sales", NONE), + aclEntry(DEFAULT, GROUP, NONE), + aclEntry(DEFAULT, OTHER, READ_EXECUTE)); + assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE); + assertPermissionGranted(DIANA, "/dir1/file1", READ); + assertPermissionDenied(CLARK, "/dir1/file1", READ); + assertPermissionDenied(CLARK, "/dir1/file1", WRITE); + assertPermissionDenied(CLARK, "/dir1/file1", EXECUTE); + assertPermissionDenied(CLARK, "/dir1/file1", READ_WRITE); + assertPermissionDenied(CLARK, "/dir1/file1", READ_EXECUTE); + assertPermissionDenied(CLARK, "/dir1/file1", WRITE_EXECUTE); + assertPermissionDenied(CLARK, "/dir1/file1", ALL); + } + + @Test + public void testAclGroupMask() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs", + (short)0644); + addAcl(inodeFile, + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, GROUP, READ_WRITE), + aclEntry(ACCESS, MASK, READ), + aclEntry(ACCESS, OTHER, READ)); + assertPermissionGranted(BRUCE, "/file1", READ_WRITE); + assertPermissionGranted(CLARK, "/file1", READ); + assertPermissionDenied(CLARK, "/file1", WRITE); + assertPermissionDenied(CLARK, "/file1", EXECUTE); + assertPermissionDenied(CLARK, "/file1", READ_WRITE); + assertPermissionDenied(CLARK, "/file1", READ_EXECUTE); + assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE); + assertPermissionDenied(CLARK, "/file1", ALL); + } + + @Test + public void testAclNamedGroup() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs", + (short)0640); + addAcl(inodeFile, + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, GROUP, "sales", READ), + aclEntry(ACCESS, MASK, READ), + aclEntry(ACCESS, OTHER, NONE)); + assertPermissionGranted(BRUCE, "/file1", READ_WRITE); + assertPermissionGranted(CLARK, "/file1", READ); + assertPermissionGranted(DIANA, "/file1", READ); + assertPermissionDenied(DIANA, "/file1", WRITE); + assertPermissionDenied(DIANA, "/file1", EXECUTE); + assertPermissionDenied(DIANA, "/file1", READ_WRITE); + assertPermissionDenied(DIANA, "/file1", READ_EXECUTE); + assertPermissionDenied(DIANA, "/file1", ALL); + } + + @Test + public void testAclNamedGroupDeny() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales", + (short)0644); + addAcl(inodeFile, + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, GROUP, "execs", NONE), + aclEntry(ACCESS, MASK, READ), + aclEntry(ACCESS, OTHER, READ)); + assertPermissionGranted(BRUCE, "/file1", READ_WRITE); + assertPermissionGranted(DIANA, "/file1", READ); + assertPermissionDenied(CLARK, "/file1", READ); + assertPermissionDenied(CLARK, "/file1", WRITE); + assertPermissionDenied(CLARK, "/file1", EXECUTE); + assertPermissionDenied(CLARK, "/file1", READ_WRITE); + assertPermissionDenied(CLARK, "/file1", READ_EXECUTE); + assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE); + assertPermissionDenied(CLARK, "/file1", ALL); + } + + @Test + public void testAclNamedGroupTraverseDeny() throws IOException { + INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce", + "execs", (short)0755); + INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs", + (short)0644); + addAcl(inodeDir, + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, GROUP, "sales", NONE), + aclEntry(ACCESS, MASK, READ_EXECUTE), + aclEntry(ACCESS, OTHER, READ_EXECUTE)); + assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE); + assertPermissionGranted(CLARK, "/dir1/file1", READ); + assertPermissionDenied(DIANA, "/dir1/file1", READ); + assertPermissionDenied(DIANA, "/dir1/file1", WRITE); + assertPermissionDenied(DIANA, "/dir1/file1", EXECUTE); + assertPermissionDenied(DIANA, "/dir1/file1", READ_WRITE); + assertPermissionDenied(DIANA, "/dir1/file1", READ_EXECUTE); + assertPermissionDenied(DIANA, "/dir1/file1", WRITE_EXECUTE); + assertPermissionDenied(DIANA, "/dir1/file1", ALL); + } + + @Test + public void testAclNamedGroupMask() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs", + (short)0644); + addAcl(inodeFile, + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, GROUP, "sales", READ_WRITE), + aclEntry(ACCESS, MASK, READ), + aclEntry(ACCESS, OTHER, READ)); + assertPermissionGranted(BRUCE, "/file1", READ_WRITE); + assertPermissionGranted(CLARK, "/file1", READ); + assertPermissionGranted(DIANA, "/file1", READ); + assertPermissionDenied(DIANA, "/file1", WRITE); + assertPermissionDenied(DIANA, "/file1", EXECUTE); + assertPermissionDenied(DIANA, "/file1", READ_WRITE); + assertPermissionDenied(DIANA, "/file1", READ_EXECUTE); + assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE); + assertPermissionDenied(DIANA, "/file1", ALL); + } + + @Test + public void testAclOther() throws IOException { + INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales", + (short)0774); + addAcl(inodeFile, + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "diana", ALL), + aclEntry(ACCESS, GROUP, READ_WRITE), + aclEntry(ACCESS, MASK, ALL), + aclEntry(ACCESS, OTHER, READ)); + assertPermissionGranted(BRUCE, "/file1", ALL); + assertPermissionGranted(DIANA, "/file1", ALL); + assertPermissionGranted(CLARK, "/file1", READ); + assertPermissionDenied(CLARK, "/file1", WRITE); + assertPermissionDenied(CLARK, "/file1", EXECUTE); + assertPermissionDenied(CLARK, "/file1", READ_WRITE); + assertPermissionDenied(CLARK, "/file1", READ_EXECUTE); + assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE); + assertPermissionDenied(CLARK, "/file1", ALL); + } + + private void addAcl(INodeWithAdditionalFields inode, AclEntry... acl) + throws IOException { + AclStorage.updateINodeAcl((INodeWithAdditionalFields)inode, + Arrays.asList(acl), Snapshot.CURRENT_STATE_ID); + } + + private void assertPermissionGranted(UserGroupInformation user, String path, + FsAction access) throws IOException { + new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path, + inodeRoot, false, null, null, access, null, true); + } + + private void assertPermissionDenied(UserGroupInformation user, String path, + FsAction access) throws IOException { + try { + new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path, + inodeRoot, false, null, null, access, null, true); + fail("expected AccessControlException for user + " + user + ", path = " + + path + ", access = " + access); + } catch (AccessControlException e) { + // expected + } + } + + private static INodeDirectory createINodeDirectory(INodeDirectory parent, + String name, String owner, String group, short perm) throws IOException { + PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group, + FsPermission.createImmutable(perm)); + INodeDirectory inodeDirectory = new INodeDirectory( + INodeId.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L); + parent.addChild(inodeDirectory); + return inodeDirectory; + } + + private static INodeFile createINodeFile(INodeDirectory parent, String name, + String owner, String group, short perm) throws IOException { + PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group, + FsPermission.createImmutable(perm)); + INodeFile inodeFile = new INodeFile(INodeId.GRANDFATHER_INODE_ID, + name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION, + PREFERRED_BLOCK_SIZE); + parent.addChild(inodeFile); + return inodeFile; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java new file mode 100644 index 00000000000..7fe45594e24 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.BeforeClass; + +/** + * Tests NameNode interaction for all ACL modification APIs. This test suite + * also covers interaction of setPermission with inodes that have ACLs. + */ +public class TestNameNodeAcl extends FSAclBaseTest { + + @BeforeClass + public static void init() throws Exception { + conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java index 6aa1276036c..fe320d3ba5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java @@ -91,6 +91,7 @@ public class TestNamenodeRetryCache { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); namesystem = cluster.getNamesystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 2f36da11d47..18ddfbc6a99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -125,6 +125,7 @@ public class TestRetryCacheWithHA { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(DataNodes).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java new file mode 100644 index 00000000000..0c8084183fb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java @@ -0,0 +1,741 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.snapshot; + +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.junit.Assert.*; + +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; +import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; + +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import com.google.common.collect.Lists; + +/** + * Tests interaction of ACLs with snapshots. + */ +public class TestAclWithSnapshot { + private static final UserGroupInformation BRUCE = + UserGroupInformation.createUserForTesting("bruce", new String[] { }); + private static final UserGroupInformation DIANA = + UserGroupInformation.createUserForTesting("diana", new String[] { }); + + private static MiniDFSCluster cluster; + private static Configuration conf; + private static FileSystem fsAsBruce, fsAsDiana; + private static DistributedFileSystem hdfs; + private static int pathCount = 0; + private static Path path, snapshotPath; + private static String snapshotName; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @BeforeClass + public static void init() throws Exception { + conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + initCluster(true); + } + + @AfterClass + public static void shutdown() throws Exception { + IOUtils.cleanup(null, hdfs, fsAsBruce, fsAsDiana); + if (cluster != null) { + cluster.shutdown(); + } + } + + @Before + public void setUp() { + ++pathCount; + path = new Path("/p" + pathCount); + snapshotName = "snapshot" + pathCount; + snapshotPath = new Path(path, new Path(".snapshot", snapshotName)); + } + + @Test + public void testOriginalAclEnforcedForSnapshotRootAfterChange() + throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, OTHER, NONE)); + hdfs.setAcl(path, aclSpec); + + assertDirPermissionGranted(fsAsBruce, BRUCE, path); + assertDirPermissionDenied(fsAsDiana, DIANA, path); + + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + + // Both original and snapshot still have same ACL. + AclStatus s = hdfs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0750, path); + + s = hdfs.getAclStatus(snapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0750, snapshotPath); + + assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath); + assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath); + + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_EXECUTE), + aclEntry(ACCESS, USER, "diana", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, OTHER, NONE)); + hdfs.setAcl(path, aclSpec); + + // Original has changed, but snapshot still has old ACL. + doSnapshotRootChangeAssertions(path, snapshotPath); + restart(false); + doSnapshotRootChangeAssertions(path, snapshotPath); + restart(true); + doSnapshotRootChangeAssertions(path, snapshotPath); + } + + private static void doSnapshotRootChangeAssertions(Path path, + Path snapshotPath) throws Exception { + AclStatus s = hdfs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "diana", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0550, path); + + s = hdfs.getAclStatus(snapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0750, snapshotPath); + + assertDirPermissionDenied(fsAsBruce, BRUCE, path); + assertDirPermissionGranted(fsAsDiana, DIANA, path); + assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath); + assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath); + } + + @Test + public void testOriginalAclEnforcedForSnapshotContentsAfterChange() + throws Exception { + Path filePath = new Path(path, "file1"); + Path subdirPath = new Path(path, "subdir1"); + Path fileSnapshotPath = new Path(snapshotPath, "file1"); + Path subdirSnapshotPath = new Path(snapshotPath, "subdir1"); + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777)); + FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600)) + .close(); + FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable( + (short)0700)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_EXECUTE), + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, OTHER, NONE)); + hdfs.setAcl(filePath, aclSpec); + hdfs.setAcl(subdirPath, aclSpec); + + assertFilePermissionGranted(fsAsBruce, BRUCE, filePath); + assertFilePermissionDenied(fsAsDiana, DIANA, filePath); + assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath); + assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath); + + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + + // Both original and snapshot still have same ACL. + AclEntry[] expected = new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }; + AclStatus s = hdfs.getAclStatus(filePath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, filePath); + + s = hdfs.getAclStatus(subdirPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, subdirPath); + + s = hdfs.getAclStatus(fileSnapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, fileSnapshotPath); + assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath); + assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath); + + s = hdfs.getAclStatus(subdirSnapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, subdirSnapshotPath); + assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath); + assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath); + + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_EXECUTE), + aclEntry(ACCESS, USER, "diana", ALL), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, OTHER, NONE)); + hdfs.setAcl(filePath, aclSpec); + hdfs.setAcl(subdirPath, aclSpec); + + // Original has changed, but snapshot still has old ACL. + doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath, + subdirSnapshotPath); + restart(false); + doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath, + subdirSnapshotPath); + restart(true); + doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath, + subdirSnapshotPath); + } + + private static void doSnapshotContentsChangeAssertions(Path filePath, + Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath) + throws Exception { + AclEntry[] expected = new AclEntry[] { + aclEntry(ACCESS, USER, "diana", ALL), + aclEntry(ACCESS, GROUP, NONE) }; + AclStatus s = hdfs.getAclStatus(filePath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0570, filePath); + assertFilePermissionDenied(fsAsBruce, BRUCE, filePath); + assertFilePermissionGranted(fsAsDiana, DIANA, filePath); + + s = hdfs.getAclStatus(subdirPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0570, subdirPath); + assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath); + assertDirPermissionGranted(fsAsDiana, DIANA, subdirPath); + + expected = new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }; + s = hdfs.getAclStatus(fileSnapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, fileSnapshotPath); + assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath); + assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath); + + s = hdfs.getAclStatus(subdirSnapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, subdirSnapshotPath); + assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath); + assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath); + } + + @Test + public void testOriginalAclEnforcedForSnapshotRootAfterRemoval() + throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, OTHER, NONE)); + hdfs.setAcl(path, aclSpec); + + assertDirPermissionGranted(fsAsBruce, BRUCE, path); + assertDirPermissionDenied(fsAsDiana, DIANA, path); + + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + + // Both original and snapshot still have same ACL. + AclStatus s = hdfs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0750, path); + + s = hdfs.getAclStatus(snapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0750, snapshotPath); + + assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath); + assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath); + + hdfs.removeAcl(path); + + // Original has changed, but snapshot still has old ACL. + doSnapshotRootRemovalAssertions(path, snapshotPath); + restart(false); + doSnapshotRootRemovalAssertions(path, snapshotPath); + restart(true); + doSnapshotRootRemovalAssertions(path, snapshotPath); + } + + private static void doSnapshotRootRemovalAssertions(Path path, + Path snapshotPath) throws Exception { + AclStatus s = hdfs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + assertPermission((short)0700, path); + + s = hdfs.getAclStatus(snapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0750, snapshotPath); + + assertDirPermissionDenied(fsAsBruce, BRUCE, path); + assertDirPermissionDenied(fsAsDiana, DIANA, path); + assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath); + assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath); + } + + @Test + public void testOriginalAclEnforcedForSnapshotContentsAfterRemoval() + throws Exception { + Path filePath = new Path(path, "file1"); + Path subdirPath = new Path(path, "subdir1"); + Path fileSnapshotPath = new Path(snapshotPath, "file1"); + Path subdirSnapshotPath = new Path(snapshotPath, "subdir1"); + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777)); + FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600)) + .close(); + FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable( + (short)0700)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, READ_EXECUTE), + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, OTHER, NONE)); + hdfs.setAcl(filePath, aclSpec); + hdfs.setAcl(subdirPath, aclSpec); + + assertFilePermissionGranted(fsAsBruce, BRUCE, filePath); + assertFilePermissionDenied(fsAsDiana, DIANA, filePath); + assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath); + assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath); + + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + + // Both original and snapshot still have same ACL. + AclEntry[] expected = new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }; + AclStatus s = hdfs.getAclStatus(filePath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, filePath); + + s = hdfs.getAclStatus(subdirPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, subdirPath); + + s = hdfs.getAclStatus(fileSnapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, fileSnapshotPath); + assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath); + assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath); + + s = hdfs.getAclStatus(subdirSnapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, subdirSnapshotPath); + assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath); + assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath); + + hdfs.removeAcl(filePath); + hdfs.removeAcl(subdirPath); + + // Original has changed, but snapshot still has old ACL. + doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath, + subdirSnapshotPath); + restart(false); + doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath, + subdirSnapshotPath); + restart(true); + doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath, + subdirSnapshotPath); + } + + private static void doSnapshotContentsRemovalAssertions(Path filePath, + Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath) + throws Exception { + AclEntry[] expected = new AclEntry[] { }; + AclStatus s = hdfs.getAclStatus(filePath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0500, filePath); + assertFilePermissionDenied(fsAsBruce, BRUCE, filePath); + assertFilePermissionDenied(fsAsDiana, DIANA, filePath); + + s = hdfs.getAclStatus(subdirPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0500, subdirPath); + assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath); + assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath); + + expected = new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }; + s = hdfs.getAclStatus(fileSnapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, fileSnapshotPath); + assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath); + assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath); + + s = hdfs.getAclStatus(subdirSnapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0550, subdirSnapshotPath); + assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath); + assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath); + } + + @Test + public void testModifyReadsCurrentState() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700)); + + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", ALL)); + hdfs.modifyAclEntries(path, aclSpec); + + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "diana", READ_EXECUTE)); + hdfs.modifyAclEntries(path, aclSpec); + + AclEntry[] expected = new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", ALL), + aclEntry(ACCESS, USER, "diana", READ_EXECUTE), + aclEntry(ACCESS, GROUP, NONE) }; + AclStatus s = hdfs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0770, path); + assertDirPermissionGranted(fsAsBruce, BRUCE, path); + assertDirPermissionGranted(fsAsDiana, DIANA, path); + } + + @Test + public void testRemoveReadsCurrentState() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700)); + + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", ALL)); + hdfs.modifyAclEntries(path, aclSpec); + + hdfs.removeAcl(path); + + AclEntry[] expected = new AclEntry[] { }; + AclStatus s = hdfs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(expected, returned); + assertPermission((short)0700, path); + assertDirPermissionDenied(fsAsBruce, BRUCE, path); + assertDirPermissionDenied(fsAsDiana, DIANA, path); + } + + @Test + public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot() + throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700)); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE)); + hdfs.modifyAclEntries(path, aclSpec); + + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + + AclStatus s = hdfs.getAclStatus(path); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE), + aclEntry(DEFAULT, GROUP, NONE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0700, path); + + s = hdfs.getAclStatus(snapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE), + aclEntry(DEFAULT, GROUP, NONE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), + aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertPermission((short)0700, snapshotPath); + + assertDirPermissionDenied(fsAsBruce, BRUCE, snapshotPath); + } + + @Test + public void testModifyAclEntriesSnapshotPath() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700)); + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE)); + exception.expect(SnapshotAccessControlException.class); + hdfs.modifyAclEntries(snapshotPath, aclSpec); + } + + @Test + public void testRemoveAclEntriesSnapshotPath() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700)); + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "bruce")); + exception.expect(SnapshotAccessControlException.class); + hdfs.removeAclEntries(snapshotPath, aclSpec); + } + + @Test + public void testRemoveDefaultAclSnapshotPath() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700)); + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + exception.expect(SnapshotAccessControlException.class); + hdfs.removeDefaultAcl(snapshotPath); + } + + @Test + public void testRemoveAclSnapshotPath() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700)); + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + exception.expect(SnapshotAccessControlException.class); + hdfs.removeAcl(snapshotPath); + } + + @Test + public void testSetAclSnapshotPath() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700)); + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "bruce")); + exception.expect(SnapshotAccessControlException.class); + hdfs.setAcl(snapshotPath, aclSpec); + } + + @Test + public void testChangeAclExceedsQuota() throws Exception { + Path filePath = new Path(path, "file1"); + Path fileSnapshotPath = new Path(snapshotPath, "file1"); + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755)); + hdfs.allowSnapshot(path); + hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET); + FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600)) + .close(); + hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ_WRITE)); + hdfs.modifyAclEntries(filePath, aclSpec); + + hdfs.createSnapshot(path, snapshotName); + + AclStatus s = hdfs.getAclStatus(filePath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0660, filePath); + + s = hdfs.getAclStatus(fileSnapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0660, filePath); + + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ)); + exception.expect(NSQuotaExceededException.class); + hdfs.modifyAclEntries(filePath, aclSpec); + } + + @Test + public void testRemoveAclExceedsQuota() throws Exception { + Path filePath = new Path(path, "file1"); + Path fileSnapshotPath = new Path(snapshotPath, "file1"); + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755)); + hdfs.allowSnapshot(path); + hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET); + FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600)) + .close(); + hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600)); + List aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ_WRITE)); + hdfs.modifyAclEntries(filePath, aclSpec); + + hdfs.createSnapshot(path, snapshotName); + + AclStatus s = hdfs.getAclStatus(filePath); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0660, filePath); + + s = hdfs.getAclStatus(fileSnapshotPath); + returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { + aclEntry(ACCESS, USER, "bruce", READ_WRITE), + aclEntry(ACCESS, GROUP, NONE) }, returned); + assertPermission((short)0660, filePath); + + aclSpec = Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", READ)); + exception.expect(NSQuotaExceededException.class); + hdfs.removeAcl(filePath); + } + + @Test + public void testGetAclStatusDotSnapshotPath() throws Exception { + hdfs.mkdirs(path); + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + AclStatus s = hdfs.getAclStatus(new Path(path, ".snapshot")); + AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); + assertArrayEquals(new AclEntry[] { }, returned); + } + + /** + * Asserts that permission is denied to the given fs/user for the given + * directory. + * + * @param fs FileSystem to check + * @param user UserGroupInformation owner of fs + * @param pathToCheck Path directory to check + * @throws Exception if there is an unexpected error + */ + private static void assertDirPermissionDenied(FileSystem fs, + UserGroupInformation user, Path pathToCheck) throws Exception { + try { + fs.listStatus(pathToCheck); + fail("expected AccessControlException for user " + user + ", path = " + + pathToCheck); + } catch (AccessControlException e) { + // expected + } + } + + /** + * Asserts that permission is granted to the given fs/user for the given + * directory. + * + * @param fs FileSystem to check + * @param user UserGroupInformation owner of fs + * @param pathToCheck Path directory to check + * @throws Exception if there is an unexpected error + */ + private static void assertDirPermissionGranted(FileSystem fs, + UserGroupInformation user, Path pathToCheck) throws Exception { + try { + fs.listStatus(pathToCheck); + } catch (AccessControlException e) { + fail("expected permission granted for user " + user + ", path = " + + pathToCheck); + } + } + + /** + * Asserts the value of the FsPermission bits on the inode of the test path. + * + * @param perm short expected permission bits + * @param pathToCheck Path to check + * @throws Exception thrown if there is an unexpected error + */ + private static void assertPermission(short perm, Path pathToCheck) + throws Exception { + AclTestHelpers.assertPermission(hdfs, pathToCheck, perm); + } + + /** + * Initialize the cluster, wait for it to become active, and get FileSystem + * instances for our test users. + * + * @param format if true, format the NameNode and DataNodes before starting up + * @throws Exception if any step fails + */ + private static void initCluster(boolean format) throws Exception { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format) + .build(); + cluster.waitActive(); + hdfs = cluster.getFileSystem(); + fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf); + fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf); + } + + /** + * Restart the cluster, optionally saving a new checkpoint. + * + * @param checkpoint boolean true to save a new checkpoint + * @throws Exception if restart fails + */ + private static void restart(boolean checkpoint) throws Exception { + NameNode nameNode = cluster.getNameNode(); + if (checkpoint) { + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + } + shutdown(); + initCluster(false); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java index 2705ab5252b..9c6839c99b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java @@ -305,7 +305,8 @@ public class TestDiff { final int i = Diff.search(current, inode.getKey()); Assert.assertTrue(i >= 0); final INodeDirectory oldinode = (INodeDirectory)current.get(i); - final INodeDirectory newinode = new INodeDirectory(oldinode, false, true); + final INodeDirectory newinode = new INodeDirectory(oldinode, false, + oldinode.getFeatures()); newinode.setModificationTime(oldinode.getModificationTime() + 1); current.set(i, newinode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index 6a5acab3e41..2bce30f0ca5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -17,11 +17,19 @@ */ package org.apache.hadoop.hdfs.web; +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; + import java.util.HashMap; +import java.util.List; import java.util.Map; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -32,6 +40,8 @@ import org.junit.Assert; import org.junit.Test; import org.mortbay.util.ajax.JSON; +import com.google.common.collect.Lists; + public class TestJsonUtil { static FileStatus toFileStatus(HdfsFileStatus f, String parent) { return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), @@ -135,6 +145,47 @@ public class TestJsonUtil { response.put("ipAddr", "127.0.0.1"); checkDecodeFailure(response); } + + @Test + public void testToAclStatus() { + String jsonString = + "{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}"; + Map json = (Map) JSON.parse(jsonString); + + List aclSpec = + Lists.newArrayList(aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "user1", READ_WRITE), + aclEntry(ACCESS, GROUP, READ_WRITE), + aclEntry(ACCESS, OTHER, READ_EXECUTE)); + + AclStatus.Builder aclStatusBuilder = new AclStatus.Builder(); + aclStatusBuilder.owner("testuser"); + aclStatusBuilder.group("supergroup"); + aclStatusBuilder.addEntries(aclSpec); + aclStatusBuilder.stickyBit(false); + + Assert.assertEquals("Should be equal", aclStatusBuilder.build(), + JsonUtil.toAclStatus(json)); + } + + @Test + public void testToJsonFromAclStatus() { + String jsonString = + "{\"AclStatus\":{\"entries\":[\"user:user1:rwx\",\"group::rw-\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}"; + AclStatus.Builder aclStatusBuilder = new AclStatus.Builder(); + aclStatusBuilder.owner("testuser"); + aclStatusBuilder.group("supergroup"); + aclStatusBuilder.stickyBit(false); + + List aclSpec = + Lists.newArrayList(aclEntry(ACCESS, USER,"user1", ALL), + aclEntry(ACCESS, GROUP, READ_WRITE)); + + aclStatusBuilder.addEntries(aclSpec); + Assert.assertEquals(jsonString, + JsonUtil.toJsonString(aclStatusBuilder.build())); + + } private void checkDecodeFailure(Map map) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java new file mode 100644 index 00000000000..2b14fe119be --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web; + +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +/** + * Tests ACL APIs via WebHDFS. + */ +public class TestWebHDFSAcl extends FSAclBaseTest { + + @BeforeClass + public static void init() throws Exception { + conf = WebHdfsTestUtil.createConf(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + } + + /** + * We need to skip this test on WebHDFS, because WebHDFS currently cannot + * resolve symlinks. + */ + @Override + @Test + @Ignore + public void testDefaultAclNewSymlinkIntermediate() { + } + + /** + * Overridden to provide a WebHdfsFileSystem wrapper for the super-user. + * + * @return WebHdfsFileSystem for super-user + * @throws Exception if creation fails + */ + @Override + protected WebHdfsFileSystem createFileSystem() throws Exception { + return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); + } + + /** + * Overridden to provide a WebHdfsFileSystem wrapper for a specific user. + * + * @param user UserGroupInformation specific user + * @return WebHdfsFileSystem for specific user + * @throws Exception if creation fails + */ + @Override + protected WebHdfsFileSystem createFileSystem(UserGroupInformation user) + throws Exception { + return WebHdfsTestUtil.getWebHdfsFileSystemAs(user, conf, + WebHdfsFileSystem.SCHEME); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java index 8818f319655..42da919ff2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java @@ -21,12 +21,14 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import java.util.Arrays; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.util.StringUtils; @@ -300,4 +302,48 @@ public class TestParam { UserParam.setUserPatternDomain(oldDomain); } + + @Test + public void testAclPermissionParam() { + final AclPermissionParam p = + new AclPermissionParam("user::rwx,group::r--,other::rwx,user:user1:rwx"); + List setAclList = + AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx", + true); + Assert.assertEquals(setAclList.toString(), p.getAclPermission(true) + .toString()); + + new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx"); + try { + new AclPermissionParam("user::rw--,group::rwx-,other::rw-"); + Assert.fail(); + } catch (IllegalArgumentException e) { + LOG.info("EXPECTED: " + e); + } + + new AclPermissionParam( + "user::rw-,group::rwx,other::rw-,user:user1:rwx,group:group1:rwx,other::rwx,mask::rwx,default:user:user1:rwx"); + + try { + new AclPermissionParam("user:r-,group:rwx,other:rw-"); + Assert.fail(); + } catch (IllegalArgumentException e) { + LOG.info("EXPECTED: " + e); + } + + try { + new AclPermissionParam("default:::r-,default:group::rwx,other::rw-"); + Assert.fail(); + } catch (IllegalArgumentException e) { + LOG.info("EXPECTED: " + e); + } + + try { + new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx"); + Assert.fail(); + } catch (IllegalArgumentException e) { + LOG.info("EXPECTED: " + e); + } + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java index 6683e33e0b4..13a9610a346 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.security; +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -24,6 +28,7 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.security.PrivilegedExceptionAction; +import java.util.Arrays; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -66,6 +71,7 @@ public class TestPermissionSymlinks { @BeforeClass public static void beforeClassSetUp() throws Exception { conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); conf.set(FsPermission.UMASK_LABEL, "000"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); @@ -101,8 +107,43 @@ public class TestPermissionSymlinks { @Test(timeout = 5000) public void testDelete() throws Exception { - // Try to delete where the symlink's parent dir is not writable fs.setPermission(linkParent, new FsPermission((short) 0555)); + doDeleteLinkParentNotWritable(); + + fs.setPermission(linkParent, new FsPermission((short) 0777)); + fs.setPermission(targetParent, new FsPermission((short) 0555)); + fs.setPermission(target, new FsPermission((short) 0555)); + doDeleteTargetParentAndTargetNotWritable(); + } + + @Test + public void testAclDelete() throws Exception { + fs.setAcl(linkParent, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, ALL))); + doDeleteLinkParentNotWritable(); + + fs.setAcl(linkParent, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, ALL))); + fs.setAcl(targetParent, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, ALL))); + fs.setAcl(target, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, ALL))); + doDeleteTargetParentAndTargetNotWritable(); + } + + private void doDeleteLinkParentNotWritable() throws Exception { + // Try to delete where the symlink's parent dir is not writable try { user.doAs(new PrivilegedExceptionAction() { @Override @@ -116,11 +157,11 @@ public class TestPermissionSymlinks { } catch (AccessControlException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } + } + + private void doDeleteTargetParentAndTargetNotWritable() throws Exception { // Try a delete where the symlink parent dir is writable, // but the target's parent and target are not - fs.setPermission(linkParent, new FsPermission((short) 0777)); - fs.setPermission(targetParent, new FsPermission((short) 0555)); - fs.setPermission(target, new FsPermission((short) 0555)); user.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws IOException { @@ -139,6 +180,20 @@ public class TestPermissionSymlinks { @Test(timeout = 5000) public void testReadWhenTargetNotReadable() throws Exception { fs.setPermission(target, new FsPermission((short) 0000)); + doReadTargetNotReadable(); + } + + @Test + public void testAclReadTargetNotReadable() throws Exception { + fs.setAcl(target, Arrays.asList( + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, user.getUserName(), NONE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, READ))); + doReadTargetNotReadable(); + } + + private void doReadTargetNotReadable() throws Exception { try { user.doAs(new PrivilegedExceptionAction() { @Override @@ -157,8 +212,22 @@ public class TestPermissionSymlinks { @Test(timeout = 5000) public void testFileStatus() throws Exception { - // Try to getFileLinkStatus the link when the target is not readable fs.setPermission(target, new FsPermission((short) 0000)); + doGetFileLinkStatusTargetNotReadable(); + } + + @Test + public void testAclGetFileLinkStatusTargetNotReadable() throws Exception { + fs.setAcl(target, Arrays.asList( + aclEntry(ACCESS, USER, READ_WRITE), + aclEntry(ACCESS, USER, user.getUserName(), NONE), + aclEntry(ACCESS, GROUP, READ), + aclEntry(ACCESS, OTHER, READ))); + doGetFileLinkStatusTargetNotReadable(); + } + + private void doGetFileLinkStatusTargetNotReadable() throws Exception { + // Try to getFileLinkStatus the link when the target is not readable user.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws IOException { @@ -176,9 +245,28 @@ public class TestPermissionSymlinks { @Test(timeout = 5000) public void testRenameLinkTargetNotWritableFC() throws Exception { - // Rename the link when the target and parent are not writable fs.setPermission(target, new FsPermission((short) 0555)); fs.setPermission(targetParent, new FsPermission((short) 0555)); + doRenameLinkTargetNotWritableFC(); + } + + @Test + public void testAclRenameTargetNotWritableFC() throws Exception { + fs.setAcl(target, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, ALL))); + fs.setAcl(targetParent, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, ALL))); + doRenameLinkTargetNotWritableFC(); + } + + private void doRenameLinkTargetNotWritableFC() throws Exception { + // Rename the link when the target and parent are not writable user.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws IOException { @@ -197,8 +285,22 @@ public class TestPermissionSymlinks { @Test(timeout = 5000) public void testRenameSrcNotWritableFC() throws Exception { - // Rename the link when the target and parent are not writable fs.setPermission(linkParent, new FsPermission((short) 0555)); + doRenameSrcNotWritableFC(); + } + + @Test + public void testAclRenameSrcNotWritableFC() throws Exception { + fs.setAcl(linkParent, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, ALL))); + doRenameSrcNotWritableFC(); + } + + private void doRenameSrcNotWritableFC() throws Exception { + // Rename the link when the target and parent are not writable try { user.doAs(new PrivilegedExceptionAction() { @Override @@ -220,9 +322,28 @@ public class TestPermissionSymlinks { @Test(timeout = 5000) public void testRenameLinkTargetNotWritableFS() throws Exception { - // Rename the link when the target and parent are not writable fs.setPermission(target, new FsPermission((short) 0555)); fs.setPermission(targetParent, new FsPermission((short) 0555)); + doRenameLinkTargetNotWritableFS(); + } + + @Test + public void testAclRenameTargetNotWritableFS() throws Exception { + fs.setAcl(target, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, ALL))); + fs.setAcl(targetParent, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, ALL))); + doRenameLinkTargetNotWritableFS(); + } + + private void doRenameLinkTargetNotWritableFS() throws Exception { + // Rename the link when the target and parent are not writable user.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws IOException { @@ -241,8 +362,22 @@ public class TestPermissionSymlinks { @Test(timeout = 5000) public void testRenameSrcNotWritableFS() throws Exception { - // Rename the link when the target and parent are not writable fs.setPermission(linkParent, new FsPermission((short) 0555)); + doRenameSrcNotWritableFS(); + } + + @Test + public void testAclRenameSrcNotWritableFS() throws Exception { + fs.setAcl(linkParent, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE), + aclEntry(ACCESS, GROUP, ALL), + aclEntry(ACCESS, OTHER, ALL))); + doRenameSrcNotWritableFS(); + } + + private void doRenameSrcNotWritableFS() throws Exception { + // Rename the link when the target and parent are not writable try { user.doAs(new PrivilegedExceptionAction() { @Override @@ -258,6 +393,4 @@ public class TestPermissionSymlinks { GenericTestUtils.assertExceptionContains("Permission denied", e); } } - - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index 281974d614d76556979dc585352c852764c86185..0a02f610573f9ba97859bccf176f8347dce00ccb 100644 GIT binary patch literal 4329 zcmcIndrVVj6hG~y&{8YlgZZ3S1?LNCnGZyrR{>vWk)hzaXxG67YD>%7qQOU;GMy}g znaw};$EGnu#DAtUGe&e|x*21ZP5;=EnHkA+ZahTM85cF$ea}|zy$}-PX>!lacfRxc zo$s9UefMiP9R3NAjKB{hx;@E177VBFpx1q*%Rc8xS!=vjZ|%Oc{o(d4E8ekktDvXW zbw-otyt%TgTevu}#mihFhQ3I87?fV-_R7wZfb0s%phFzlpJx^xjcPnoPo~FgboB{?&CJZ0>9m2#mN93B z75!|Gc=FSR_iqN5#Wn4Hlv8`<<68*BwB^j(3rz$Jz`!(UhgNiRV4*d=xA+)Z4Ut#{ zl$ox%(!WVArD#PRnt_>p*IKvH#RJ5FhF_1S-yq@P))axU8>yBANK7wv2jr5FKTzYW z^q_Y}{+Mf3hP$!vC$&!##xgLI8pOWw!(U%z=6JPvL-X`j?rm0^v##$<9mTu{sm)*t z`dn4P3V%qubV-+ub-Twfe@p-V5vUB!qz$?I&6Pu=8PwqZptSvm+K(RL6^8_hBgH66 z&lvM0gAcoK-nErCcp{}lPtCnoJZ@m2Io>^MXWJN(lu)&qKhKC`JVSaL zb>XIhU%p~Z2}64u!RV7W=Wq7Os5OgxZDkgQF-$QHbkpV?46Z|w61}#w)i9V%9j;M1 zViMZJOvD3@5v)LvtUy-pvEtdt9o0?i|@7>1tMW$=eUIU$1Kg;jp|HfBvnF3XJeB%o(+!DTmxxu31_20 z=m0?JeDZ^tP{U+__3PsBo!FGK~D()Fxnnn7!hN# z?<`+*i9m%MImj®(SJ$(R=d<6Wx5xzYE>oeDY&U;=%SVqtKv=CekEv(n%k zNMoUas6mPG#lNXjvi>{VCK51!iC9YjG2bu$)4*$$LXE=h+~d_#d4f}w;ghssSJV-= zX&1D7R<}c>&M*mU34OJnUfq9^vqbLohgDbWs#DG9C#+F@8tg4)a&bbP{@|(Kjhhu|!*>$5yi7kj6p@)u6=q z;ypI&_cLdgiUbT`2G$a~CN(D%xAO|6Q=@Qq!|j$;yh2uG_`JvBP>vlc$Fh(P@~Y(;2Cm&BfqXhCJ8{UaisPLn*v2^|8*T0MLdjTCVoihI=bnt z6To?9;Xzc&dtX}e&vBmFeeoCxPY?Ex5KxpSS;qP>kPGx5Hlt3fdFtG|1FGVR0|@j#gDg7PCk5LoJmZE3Qgn9CN9b zj9{T>Ww55w>-Ks2pvD(9M;(K&Q-vSaBwYOHMxGQGkTC{6RquArQT?&9b# zK%gcqBnKn+14lh$LK+KAsEW0b7+-u3WhYKKk|z={0CFXf0OGA3(>=Tc=ToC_zxhaW z|9^H3JDQ2zZptudI)f|g(aS?DM4 zmSyM+#*r!>kI%>+d<^Z3=}oOUI!k9`qgF&L`+ zelI?E#sQfMhG585R;KIT!LG35nAYsQ6{njg-AHA@XGg*KrMpDaEAPqa>Rl6O06e11T=X`#6fGS yeQ)QEF7nAo)0XmuY0b=kU~CJp3pp$+I(Glt$qadULOkE~a>(f_@s{svd+ZL_`(83oC5pt?^dhLZ+hx(Ewi87yX9`gp?ooF(6iAdQ}Wyi z7t_u^SIQEK=Wa_9-kLK`cphJ$luidMzhcua~r_&=H&PtEgXz@0< zHP3pFtHG^ngXH{0EA#4VG^elJUSuy^@Xm^Y{F1za@|kn8W@X#5Gi~KIFx#?br)T1y z4dMgJKJ%V}g$p+~O}z9?*;qFsM|Ed@TxzBi07kB%AJ3i**0Zf;@6E?>s6>W=MYEvm zT^qDYhF20Jb5UdidfdRDQo!NRvO3=Id{Jm+e96lWUNdr%iR zT70dQ*CZtL9t4w9YbGQ%0}@FL1Gwk-z~N}4b3revZG_PF8fLcCx(%%p&B89A8IaVg~M0wb9mNkK5@3K zR@n*@P2~0Fe(b@=+eRsrh#%uB(vMrbd}|W#9L=<$v#af>GLd(Vk%mBzGhz?B44nYD znlE0Qjp6Rxgvvo6_@JN&b`eS6r>@NLOv=$#QCMa8%44vkxpER?t+sI)5Qv>hI>!fO+`~I ziEdT~XIr*6DZ~_IGWn=@ZU1+3_iceZg>)ivPjrsoFOa8(Bh&a*M5Yyh*bFFC0jsuD zUsOmp$~5v(0V2=USWXL`>7)~R9#(vGeYfCA>yO7sd3uP2ia;Tr`_`<>qS9uNVJiJ( zv2>9ni=xQ*c}yci5VBw_D2fQd{-6_K{SRf;FgfdLE}I8nWeqhdEJ&LuveGi<9??V? zr405}6j`=w)vYxOB?3q%TE%Bld)%7)B1IW&(C}zo_t`>`qRepbMUh2ekv(EXmU~$F zS7F9rBU%M8Ex&U4ra;bOgNDbuYKI*X$TP!{Y5Zs!MU#)%3@DWR6WhV0kZzQ0@=*cI z8%j@s;F(1_F_%vc#uQx;Ja6>JW1>8diG_+lA)ZP1YW@^OHk%An>0M-@Ebf$KQ4|@! z*J)%3LKch#MG+y`A9NzD|B0*`zVP=HiKodN`6)|^skgs<^2ojZRC-s>9~paL_fX~x z@LwXT31Zl3>Gjvw)i`VWp(f{M>3$BsUlKq-N~xYoO5VpAA%`_oGmOrtc6chSb(+Jg zS@AuNKLb2|t55R=9|@(coC`G(wx|-`SMmdBf%T8PU=h;2t^!yxJ7?62B7Bp20fwjT z$J&FBgn0u5B6gGochpn1qvBp<`}t!7-WrT)bkMX|2VV+T=5ts6x@&FoCbEC9)(7)D zaDa7>S%SYK%k7SO&F-qyjvaIbFOvnF?{9VLv^|1vA@nvFAe^w7uHjdH7W*)ddk+Jd z<4Th1tKA`~h|nrCq-f!*tWu_YlCp%~xsVS=x64&W4z?uVlZDpxH1T(G64%n&UfPo0 z)0E01C - -52 + -53 OP_START_LOG_SEGMENT @@ -13,8 +13,8 @@ 2 1 - 1390942564729 - f270a35a4ebb9984 + 1390519460949 + dc8d30edc97df67d @@ -24,8 +24,8 @@ 3 2 - 1390942564735 - 22391ec22bc0fc20 + 1390519460952 + 096bc20b6debed03 @@ -37,18 +37,18 @@ 16386 /file_create 1 - 1390251365583 - 1390251365583 + 1389828264873 + 1389828264873 512 - DFSClient_NONMAPREDUCE_382541401_1 + DFSClient_NONMAPREDUCE_16108824_1 127.0.0.1 - andrew + jing supergroup 420 - ff209a09-9745-4242-837f-21c6b95a1b70 - 7 + b5928e80-e373-4807-a688-f94483d08ce5 + 9 @@ -59,13 +59,13 @@ 0 /file_create 1 - 1390251365626 - 1390251365583 + 1389828265699 + 1389828264873 512 - andrew + jing supergroup 420 @@ -78,9 +78,9 @@ 0 /file_create /file_moved - 1390251365645 - ff209a09-9745-4242-837f-21c6b95a1b70 - 9 + 1389828265705 + b5928e80-e373-4807-a688-f94483d08ce5 + 11 @@ -89,9 +89,9 @@ 7 0 /file_moved - 1390251365666 - ff209a09-9745-4242-837f-21c6b95a1b70 - 10 + 1389828265712 + b5928e80-e373-4807-a688-f94483d08ce5 + 12 @@ -101,9 +101,9 @@ 0 16387 /directory_mkdir - 1390251365693 + 1389828265722 - andrew + jing supergroup 493 @@ -136,8 +136,8 @@ 12 /directory_mkdir snapshot1 - ff209a09-9745-4242-837f-21c6b95a1b70 - 15 + b5928e80-e373-4807-a688-f94483d08ce5 + 17 @@ -147,8 +147,8 @@ /directory_mkdir snapshot1 snapshot2 - ff209a09-9745-4242-837f-21c6b95a1b70 - 16 + b5928e80-e373-4807-a688-f94483d08ce5 + 18 @@ -157,8 +157,8 @@ 14 /directory_mkdir snapshot2 - ff209a09-9745-4242-837f-21c6b95a1b70 - 17 + b5928e80-e373-4807-a688-f94483d08ce5 + 19 @@ -169,18 +169,18 @@ 16388 /file_create 1 - 1390251365804 - 1390251365804 + 1389828265757 + 1389828265757 512 - DFSClient_NONMAPREDUCE_382541401_1 + DFSClient_NONMAPREDUCE_16108824_1 127.0.0.1 - andrew + jing supergroup 420 - ff209a09-9745-4242-837f-21c6b95a1b70 - 18 + b5928e80-e373-4807-a688-f94483d08ce5 + 20 @@ -191,13 +191,13 @@ 0 /file_create 1 - 1390251365815 - 1390251365804 + 1389828265759 + 1389828265757 512 - andrew + jing supergroup 420 @@ -253,10 +253,10 @@ 0 /file_create /file_moved - 1390251365931 + 1389828265782 NONE - ff209a09-9745-4242-837f-21c6b95a1b70 - 25 + b5928e80-e373-4807-a688-f94483d08ce5 + 27 @@ -267,18 +267,18 @@ 16389 /file_concat_target 1 - 1390251365952 - 1390251365952 + 1389828265787 + 1389828265787 512 - DFSClient_NONMAPREDUCE_382541401_1 + DFSClient_NONMAPREDUCE_16108824_1 127.0.0.1 - andrew + jing supergroup 420 - ff209a09-9745-4242-837f-21c6b95a1b70 - 27 + b5928e80-e373-4807-a688-f94483d08ce5 + 29 @@ -383,8 +383,8 @@ 0 /file_concat_target 1 - 1390251366514 - 1390251365952 + 1389828266540 + 1389828265787 512 @@ -404,7 +404,7 @@ 1003 - andrew + jing supergroup 420 @@ -418,18 +418,18 @@ 16390 /file_concat_0 1 - 1390251366533 - 1390251366533 + 1389828266544 + 1389828266544 512 - DFSClient_NONMAPREDUCE_382541401_1 + DFSClient_NONMAPREDUCE_16108824_1 127.0.0.1 - andrew + jing supergroup 420 - ff209a09-9745-4242-837f-21c6b95a1b70 - 40 + b5928e80-e373-4807-a688-f94483d08ce5 + 41 @@ -534,8 +534,8 @@ 0 /file_concat_0 1 - 1390251366726 - 1390251366533 + 1389828266569 + 1389828266544 512 @@ -555,7 +555,7 @@ 1006 - andrew + jing supergroup 420 @@ -569,18 +569,18 @@ 16391 /file_concat_1 1 - 1390251366746 - 1390251366746 + 1389828266572 + 1389828266572 512 - DFSClient_NONMAPREDUCE_382541401_1 + DFSClient_NONMAPREDUCE_16108824_1 127.0.0.1 - andrew + jing supergroup 420 - ff209a09-9745-4242-837f-21c6b95a1b70 - 52 + b5928e80-e373-4807-a688-f94483d08ce5 + 53 @@ -685,8 +685,8 @@ 0 /file_concat_1 1 - 1390251366795 - 1390251366746 + 1389828266599 + 1389828266572 512 @@ -706,7 +706,7 @@ 1009 - andrew + jing supergroup 420 @@ -718,13 +718,13 @@ 56 0 /file_concat_target - 1390251366802 + 1389828266603 /file_concat_0 /file_concat_1 - ff209a09-9745-4242-837f-21c6b95a1b70 - 63 + b5928e80-e373-4807-a688-f94483d08ce5 + 64 @@ -735,15 +735,15 @@ 16392 /file_symlink /file_concat_target - 1390251366811 - 1390251366811 + 1389828266633 + 1389828266633 - andrew + jing supergroup 511 - ff209a09-9745-4242-837f-21c6b95a1b70 - 64 + b5928e80-e373-4807-a688-f94483d08ce5 + 66 @@ -754,18 +754,18 @@ 16393 /hard-lease-recovery-test 1 - 1390251366819 - 1390251366819 + 1389828266637 + 1389828266637 512 - DFSClient_NONMAPREDUCE_382541401_1 + DFSClient_NONMAPREDUCE_16108824_1 127.0.0.1 - andrew + jing supergroup 420 - ff209a09-9745-4242-837f-21c6b95a1b70 - 65 + b5928e80-e373-4807-a688-f94483d08ce5 + 67 @@ -821,23 +821,7 @@ OP_REASSIGN_LEASE 64 - DFSClient_NONMAPREDUCE_382541401_1 - /hard-lease-recovery-test - HDFS_NameNode - - - - OP_SET_GENSTAMP_V2 - - 65 - 1012 - - - - OP_REASSIGN_LEASE - - 66 - HDFS_NameNode + DFSClient_NONMAPREDUCE_16108824_1 /hard-lease-recovery-test HDFS_NameNode @@ -845,23 +829,23 @@ OP_CLOSE - 67 + 65 0 0 /hard-lease-recovery-test 1 - 1390251371402 - 1390251366819 + 1389828269751 + 1389828266637 512 1073741834 11 - 1012 + 1011 - andrew + jing supergroup 420 @@ -870,72 +854,79 @@ OP_ADD_CACHE_POOL - 68 + 66 pool1 - andrew - andrew + jing + staff 493 9223372036854775807 2305843009213693951 - ff209a09-9745-4242-837f-21c6b95a1b70 - 73 + b5928e80-e373-4807-a688-f94483d08ce5 + 74 OP_MODIFY_CACHE_POOL - 69 + 67 pool1 99 - ff209a09-9745-4242-837f-21c6b95a1b70 - 74 + b5928e80-e373-4807-a688-f94483d08ce5 + 75 OP_ADD_CACHE_DIRECTIVE - 70 + 68 1 /path 1 pool1 - 2305844399465065912 - ff209a09-9745-4242-837f-21c6b95a1b70 - 75 + 2305844399041964876 + b5928e80-e373-4807-a688-f94483d08ce5 + 76 OP_MODIFY_CACHE_DIRECTIVE - 71 + 69 1 2 - ff209a09-9745-4242-837f-21c6b95a1b70 - 76 + b5928e80-e373-4807-a688-f94483d08ce5 + 77 OP_REMOVE_CACHE_DIRECTIVE - 72 + 70 1 - ff209a09-9745-4242-837f-21c6b95a1b70 - 77 + b5928e80-e373-4807-a688-f94483d08ce5 + 78 OP_REMOVE_CACHE_POOL - 73 + 71 pool1 - ff209a09-9745-4242-837f-21c6b95a1b70 - 78 + b5928e80-e373-4807-a688-f94483d08ce5 + 79 OP_END_LOG_SEGMENT - 74 + 72 + + + + OP_SET_ACL + + 73 + /file_set_acl diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml new file mode 100644 index 00000000000..c01c56d46a3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml @@ -0,0 +1,976 @@ + + + + + + + + test + + + + + + getfacl: basic permissions + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -getfacl /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rw- + + + SubstringComparator + group::r-- + + + SubstringComparator + other::r-- + + + + + getfacl: basic permissions for directory + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm /dir1 + + + + SubstringComparator + # file: /dir1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rwx + + + SubstringComparator + group::r-x + + + SubstringComparator + other::r-x + + + + + setfacl : Add an ACL + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfacl -m user:bob:r-- /file1 + -fs NAMENODE -getfacl /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rw- + + + SubstringComparator + user:bob:r-- + + + SubstringComparator + group::r-- + + + SubstringComparator + mask::r-- + + + SubstringComparator + other::r-- + + + + + setfacl : Add multiple ACLs at once + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /file1 + -fs NAMENODE -getfacl /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rw- + + + SubstringComparator + user:bob:r-- + + + SubstringComparator + group::r-- + + + SubstringComparator + group:users:r-x + + + SubstringComparator + mask::r-x + + + SubstringComparator + other::r-- + + + + + setfacl : Remove an ACL + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfacl -m user:bob:r--,user:charlie:r-x /file1 + -fs NAMENODE -setfacl -x user:bob /file1 + -fs NAMENODE -getfacl /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rw- + + + SubstringComparator + user:charlie:r-x + + + SubstringComparator + group::r-- + + + SubstringComparator + other::r-- + + + RegexpAcrossOutputComparator + .*(?!bob)* + + + + + setfacl : Add default ACL + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /dir1 + -fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + SubstringComparator + # file: /dir1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rwx + + + SubstringComparator + user:bob:r-- + + + SubstringComparator + group::r-x + + + SubstringComparator + group:users:r-x + + + SubstringComparator + mask::r-x + + + SubstringComparator + other::r-x + + + SubstringComparator + default:user::rwx + + + SubstringComparator + default:user:charlie:r-x + + + SubstringComparator + default:group::r-x + + + SubstringComparator + default:group:admin:rwx + + + SubstringComparator + default:mask::rwx + + + SubstringComparator + default:other::r-x + + + + + setfacl : try adding default ACL to file + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfacl -m default:user:charlie:r-x /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + setfacl: Invalid ACL: only directories may have a default ACL + + + + + setfacl : Remove one default ACL + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /dir1 + -fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1 + -fs NAMENODE -setfacl -x default:user:charlie /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + SubstringComparator + # file: /dir1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rwx + + + SubstringComparator + user:bob:r-- + + + SubstringComparator + group::r-x + + + SubstringComparator + group:users:r-x + + + SubstringComparator + mask::r-x + + + SubstringComparator + other::r-x + + + SubstringComparator + default:user::rwx + + + SubstringComparator + default:group::r-x + + + SubstringComparator + default:group:admin:rwx + + + SubstringComparator + default:mask::rwx + + + SubstringComparator + default:other::r-x + + + RegexpAcrossOutputComparator + .*(?!default:user:charlie).* + + + + + setfacl : Remove all default ACL + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /dir1 + -fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1 + -fs NAMENODE -setfacl -k /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + SubstringComparator + # file: /dir1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rwx + + + SubstringComparator + user:bob:r-- + + + SubstringComparator + group::r-x + + + SubstringComparator + group:users:r-x + + + SubstringComparator + mask::r-x + + + SubstringComparator + other::r-x + + + RegexpAcrossOutputComparator + .*(?!default).* + + + + + setfacl : Remove all but base ACLs for a directory + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m user:charlie:r-x,default:group:admin:rwx /dir1 + -fs NAMENODE -setfacl -b /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + SubstringComparator + # file: /dir1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rwx + + + SubstringComparator + group::r-x + + + SubstringComparator + other::r-x + + + RegexpAcrossOutputComparator + .*(?!charlie).* + + + RegexpAcrossOutputComparator + .*(?!default).* + + + RegexpAcrossOutputComparator + .*(?!admin).* + + + + + setfacl : Remove all but base ACLs for a file + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /file1 + -fs NAMENODE -setfacl -b /file1 + -fs NAMENODE -getfacl /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rw- + + + SubstringComparator + group::r-- + + + SubstringComparator + other::r-- + + + RegexpAcrossOutputComparator + .*(?!charlie).* + + + RegexpAcrossOutputComparator + .*(?!admin).* + + + + + setfacl : check inherit default ACL to file + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1 + -fs NAMENODE -touchz /dir1/file + -fs NAMENODE -getfacl /dir1/file + + + -fs NAMENODE -rm -R /dir1 + + + + SubstringComparator + # file: /dir1/file + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rw- + + + SubstringComparator + user:charlie:r-x + + + SubstringComparator + group::r-x + + + SubstringComparator + group:admin:rwx + + + SubstringComparator + mask::r-- + + + SubstringComparator + other::r-- + + + RegexpAcrossOutputComparator + .*(?!default).* + + + + + setfacl : check inherit default ACL to dir + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1 + -fs NAMENODE -mkdir /dir1/dir2 + -fs NAMENODE -getfacl /dir1/dir2 + + + -fs NAMENODE -rm -R /dir1 + + + + SubstringComparator + # file: /dir1/dir2 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rwx + + + SubstringComparator + user:charlie:r-x + + + SubstringComparator + group::r-x + + + SubstringComparator + group:admin:rwx + + + SubstringComparator + mask::rwx + + + SubstringComparator + default:user::rwx + + + SubstringComparator + default:user:charlie:r-x + + + SubstringComparator + default:group::r-x + + + SubstringComparator + default:group:admin:rwx + + + SubstringComparator + default:mask::rwx + + + SubstringComparator + default:other::r-x + + + SubstringComparator + other::r-x + + + + + getfacl -R : recursive + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /dir1 + -fs NAMENODE -mkdir /dir1/dir2 + -fs NAMENODE -setfacl -m user:user1:r-x,group:users:rwx /dir1/dir2 + -fs NAMENODE -getfacl -R /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + ExactComparator + # file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:r-x#LF#group::r-x#LF#group:admin:rwx#LF#mask::rwx#LF#other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:user1:r-x#LF#group::r-x#LF#group:users:rwx#LF#mask::rwx#LF#other::r-x#LF##LF# + + + + + setfacl -R : recursive + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -mkdir /dir1/dir2 + -fs NAMENODE -setfacl -R -m user:charlie:r-x,group:admin:rwx /dir1 + -fs NAMENODE -getfacl -R /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + ExactComparator + # file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:r-x#LF#group::r-x#LF#group:admin:rwx#LF#mask::rwx#LF#other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:r-x#LF#group::r-x#LF#group:admin:rwx#LF#mask::rwx#LF#other::r-x#LF##LF# + + + + + setfacl --set : Set full set of ACLs + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /dir1 + -fs NAMENODE -setfacl --set user::rw-,group::r--,other::r--,user:user1:r-x,group:users:rw- /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + ExactComparator + # file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:user1:r-x#LF#group::r--#LF#group:users:rw-#LF#mask::rwx#LF#other::r--#LF##LF# + + + + + setfacl -x mask : remove mask entry along with other ACL entries + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /dir1 + -fs NAMENODE -setfacl -x mask::,user:charlie,group:admin /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + ExactComparator + # file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#group::r-x#LF#other::r-x#LF##LF# + + + + + getfacl: only default ACL + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m default:user:charlie:rwx /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + SubstringComparator + # file: /dir1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rwx + + + SubstringComparator + group::r-x + + + SubstringComparator + other::r-x + + + SubstringComparator + default:user::rwx + + + SubstringComparator + default:user:charlie:rwx + + + SubstringComparator + default:group::r-x + + + SubstringComparator + default:mask::rwx + + + SubstringComparator + default:other::r-x + + + + + getfacl: effective permissions + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m user:charlie:rwx,group::-wx,group:sales:rwx,mask::r-x,default:user:charlie:rwx,default:group::r-x,default:group:sales:rwx,default:mask::rw- /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + SubstringComparator + # file: /dir1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rwx + + + RegexpComparator + ^user:charlie:rwx\t#effective:r-x$ + + + RegexpComparator + ^group::-wx\t#effective:--x$ + + + RegexpComparator + ^group:sales:rwx\t#effective:r-x$ + + + SubstringComparator + mask::r-x + + + SubstringComparator + other::r-x + + + SubstringComparator + default:user::rwx + + + RegexpComparator + ^default:user:charlie:rwx\t#effective:rw-$ + + + RegexpComparator + ^default:group::r-x\t#effective:r--$ + + + RegexpComparator + ^default:group:sales:rwx\t#effective:rw-$ + + + SubstringComparator + default:mask::rw- + + + SubstringComparator + default:other::r-x + + + + + ls: display extended acl marker + + -fs NAMENODE -mkdir -p /dir1/dir2 + -fs NAMENODE -setfacl -m user:charlie:rwx,group::-wx,group:sales:rwx,mask::r-x,default:user:charlie:rwx,default:group::r-x,default:group:sales:rwx,default:mask::rw- /dir1/dir2 + -fs NAMENODE -ls /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^drwxr-xr-x\+( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir2 + + + + + setfacl: recursive modify entries with mix of files and directories + + -fs NAMENODE -mkdir -p /dir1 + -fs NAMENODE -touchz /dir1/file1 + -fs NAMENODE -mkdir -p /dir1/dir2 + -fs NAMENODE -touchz /dir1/dir2/file2 + -fs NAMENODE -setfacl -R -m user:charlie:rwx,default:user:charlie:r-x /dir1 + -fs NAMENODE -getfacl -R /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + ExactComparator + # file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2/file2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF## file: /dir1/file1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF# + + + + + setfacl: recursive remove entries with mix of files and directories + + -fs NAMENODE -mkdir -p /dir1 + -fs NAMENODE -touchz /dir1/file1 + -fs NAMENODE -mkdir -p /dir1/dir2 + -fs NAMENODE -touchz /dir1/dir2/file2 + -fs NAMENODE -setfacl -R -m user:bob:rwx,user:charlie:rwx,default:user:bob:rwx,default:user:charlie:r-x /dir1 + -fs NAMENODE -setfacl -R -x user:bob,default:user:bob /dir1 + -fs NAMENODE -getfacl -R /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + ExactComparator + # file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2/file2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF## file: /dir1/file1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF# + + + + + setfacl: recursive set with mix of files and directories + + -fs NAMENODE -mkdir -p /dir1 + -fs NAMENODE -touchz /dir1/file1 + -fs NAMENODE -mkdir -p /dir1/dir2 + -fs NAMENODE -touchz /dir1/dir2/file2 + -fs NAMENODE -setfacl -R --set user::rwx,user:charlie:rwx,group::r-x,other::r-x,default:user:charlie:r-x /dir1 + -fs NAMENODE -getfacl -R /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + ExactComparator + # file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2/file2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF##LF## file: /dir1/file1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF##LF# + + + + +