diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java index b65b7a0b438..b9def6447a8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java @@ -146,7 +146,9 @@ public class AclEntry { * @return Builder this builder, for call chaining */ public Builder setName(String name) { - this.name = name; + if (name != null && !name.isEmpty()) { + this.name = name; + } return this; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java index 4a7258f0a27..9d7500a697b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import com.google.common.base.Objects; +import com.google.common.base.Preconditions; import com.google.common.collect.Lists; /** @@ -36,6 +37,7 @@ public class AclStatus { private final String group; private final boolean stickyBit; private final List entries; + private final FsPermission permission; /** * Returns the file owner. @@ -73,6 +75,14 @@ public class AclStatus { return entries; } + /** + * Returns the permission set for the path + * @return {@link FsPermission} for the path + */ + public FsPermission getPermission() { + return permission; + } + @Override public boolean equals(Object o) { if (o == null) { @@ -113,6 +123,7 @@ public class AclStatus { private String group; private boolean stickyBit; private List entries = Lists.newArrayList(); + private FsPermission permission = null; /** * Sets the file owner. @@ -172,13 +183,22 @@ public class AclStatus { return this; } + /** + * Sets the permission for the file. + * @param permission + */ + public Builder setPermission(FsPermission permission) { + this.permission = permission; + return this; + } + /** * Builds a new AclStatus populated with the set properties. * * @return AclStatus new AclStatus */ public AclStatus build() { - return new AclStatus(owner, group, stickyBit, entries); + return new AclStatus(owner, group, stickyBit, entries, permission); } } @@ -190,12 +210,67 @@ public class AclStatus { * @param group String file group * @param stickyBit the sticky bit * @param entries the ACL entries + * @param permission permission of the path */ private AclStatus(String owner, String group, boolean stickyBit, - Iterable entries) { + Iterable entries, FsPermission permission) { this.owner = owner; this.group = group; this.stickyBit = stickyBit; this.entries = Lists.newArrayList(entries); + this.permission = permission; + } + + /** + * Get the effective permission for the AclEntry + * @param entry AclEntry to get the effective action + */ + public FsAction getEffectivePermission(AclEntry entry) { + return getEffectivePermission(entry, permission); + } + + /** + * Get the effective permission for the AclEntry.
+ * Recommended to use this API ONLY if client communicates with the old + * NameNode, needs to pass the Permission for the path to get effective + * permission, else use {@link AclStatus#getEffectivePermission(AclEntry)}. + * @param entry AclEntry to get the effective action + * @param permArg Permission for the path. However if the client is NOT + * communicating with old namenode, then this argument will not have + * any preference. + * @return Returns the effective permission for the entry. + * @throws IllegalArgumentException If the client communicating with old + * namenode and permission is not passed as an argument. + */ + public FsAction getEffectivePermission(AclEntry entry, FsPermission permArg) + throws IllegalArgumentException { + // At least one permission bits should be available. + Preconditions.checkArgument(this.permission != null || permArg != null, + "Permission bits are not available to calculate effective permission"); + if (this.permission != null) { + // permission bits from server response will have the priority for + // accuracy. + permArg = this.permission; + } + if ((entry.getName() != null || entry.getType() == AclEntryType.GROUP)) { + if (entry.getScope() == AclEntryScope.ACCESS) { + FsAction entryPerm = entry.getPermission(); + return entryPerm.and(permArg.getGroupAction()); + } else { + Preconditions.checkArgument(this.entries.contains(entry) + && this.entries.size() >= 3, + "Passed default ACL entry not found in the list of ACLs"); + // default mask entry for effective permission calculation will be the + // penultimate entry. This can be mask entry in case of extended ACLs. + // In case of minimal ACL, this is the owner group entry, and we end up + // intersecting group FsAction with itself, which is a no-op. + FsAction defaultMask = this.entries.get(this.entries.size() - 2) + .getPermission(); + FsAction entryPerm = entry.getPermission(); + return entryPerm.and(defaultMask); + } + } else { + return entry.getPermission(); + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java index 206576cfa54..d139ebadce3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java @@ -86,22 +86,26 @@ class AclCommands extends FsCommand { (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T")); } - List entries = perm.getAclBit() ? - item.fs.getAclStatus(item.path).getEntries() : - Collections.emptyList(); + AclStatus aclStatus = item.fs.getAclStatus(item.path); + List entries = perm.getAclBit() ? aclStatus.getEntries() + : Collections. emptyList(); ScopedAclEntries scopedEntries = new ScopedAclEntries( AclUtil.getAclFromPermAndEntries(perm, entries)); - printAclEntriesForSingleScope(scopedEntries.getAccessEntries()); - printAclEntriesForSingleScope(scopedEntries.getDefaultEntries()); + printAclEntriesForSingleScope(aclStatus, perm, + scopedEntries.getAccessEntries()); + printAclEntriesForSingleScope(aclStatus, perm, + scopedEntries.getDefaultEntries()); out.println(); } /** * Prints all the ACL entries in a single scope. - * + * @param aclStatus AclStatus for the path + * @param fsPerm FsPermission for the path * @param entries List containing ACL entries of file */ - private void printAclEntriesForSingleScope(List entries) { + private void printAclEntriesForSingleScope(AclStatus aclStatus, + FsPermission fsPerm, List entries) { if (entries.isEmpty()) { return; } @@ -110,10 +114,8 @@ class AclCommands extends FsCommand { out.println(entry); } } else { - // ACL sort order guarantees mask is the second-to-last entry. - FsAction maskPerm = entries.get(entries.size() - 2).getPermission(); for (AclEntry entry: entries) { - printExtendedAclEntry(entry, maskPerm); + printExtendedAclEntry(aclStatus, fsPerm, entry); } } } @@ -123,14 +125,16 @@ class AclCommands extends FsCommand { * permissions of the entry, then also prints the restricted version as the * effective permissions. The mask applies to all named entries and also * the unnamed group entry. - * + * @param aclStatus AclStatus for the path + * @param fsPerm FsPermission for the path * @param entry AclEntry extended ACL entry to print - * @param maskPerm FsAction permissions in the ACL's mask entry */ - private void printExtendedAclEntry(AclEntry entry, FsAction maskPerm) { + private void printExtendedAclEntry(AclStatus aclStatus, + FsPermission fsPerm, AclEntry entry) { if (entry.getName() != null || entry.getType() == AclEntryType.GROUP) { FsAction entryPerm = entry.getPermission(); - FsAction effectivePerm = entryPerm.and(maskPerm); + FsAction effectivePerm = aclStatus + .getEffectivePermission(entry, fsPerm); if (entryPerm != effectivePerm) { out.println(String.format("%s\t#effective:%s", entry, effectivePerm.SYMBOL)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 769be433084..7fcc8d24715 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -441,6 +441,9 @@ Release 2.7.0 - UNRELEASED HDFS-7476. Consolidate ACL-related operations to a single class. (wheat9 via cnauroth) + HDFS-7384. 'getfacl' command and 'getAclStatus' output should be in sync. + (Vinayakumar B via cnauroth) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 2a5edc8dc94..5a3658573c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -2278,15 +2278,24 @@ public class PBHelper { public static AclStatus convert(GetAclStatusResponseProto e) { AclStatusProto r = e.getResult(); - return new AclStatus.Builder().owner(r.getOwner()).group(r.getGroup()) - .stickyBit(r.getSticky()) - .addEntries(convertAclEntry(r.getEntriesList())).build(); + AclStatus.Builder builder = new AclStatus.Builder(); + builder.owner(r.getOwner()).group(r.getGroup()).stickyBit(r.getSticky()) + .addEntries(convertAclEntry(r.getEntriesList())); + if (r.hasPermission()) { + builder.setPermission(convert(r.getPermission())); + } + return builder.build(); } public static GetAclStatusResponseProto convert(AclStatus e) { - AclStatusProto r = AclStatusProto.newBuilder().setOwner(e.getOwner()) + AclStatusProto.Builder builder = AclStatusProto.newBuilder(); + builder.setOwner(e.getOwner()) .setGroup(e.getGroup()).setSticky(e.isStickyBit()) - .addAllEntries(convertAclEntryProto(e.getEntries())).build(); + .addAllEntries(convertAclEntryProto(e.getEntries())); + if (e.getPermission() != null) { + builder.setPermission(convert(e.getPermission())); + } + AclStatusProto r = builder.build(); return GetAclStatusResponseProto.newBuilder().setResult(r).build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java index ac899aab362..c2dee207c32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java @@ -171,9 +171,11 @@ class FSDirAclOp { INode inode = FSDirectory.resolveLastINode(srcs, iip); int snapshotId = iip.getPathSnapshotId(); List acl = AclStorage.readINodeAcl(inode, snapshotId); + FsPermission fsPermission = inode.getFsPermission(snapshotId); return new AclStatus.Builder() .owner(inode.getUserName()).group(inode.getGroupName()) - .stickyBit(inode.getFsPermission(snapshotId).getStickyBit()) + .stickyBit(fsPermission.getStickyBit()) + .setPermission(fsPermission) .addEntries(acl).build(); } finally { fsd.readUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java index ff665e7798d..a26f1bf6cd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java @@ -34,10 +34,12 @@ import java.util.Map; import com.google.common.collect.ImmutableList; import com.google.protobuf.CodedInputStream; import com.google.protobuf.InvalidProtocolBufferException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; @@ -46,6 +48,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf; import org.apache.hadoop.hdfs.server.namenode.FSImageUtil; import org.apache.hadoop.hdfs.server.namenode.FsImageProto; import org.apache.hadoop.hdfs.server.namenode.INodeId; +import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.LimitInputStream; import org.codehaus.jackson.map.ObjectMapper; @@ -314,27 +317,15 @@ class FSImageLoader { * @throws IOException if failed to serialize fileStatus to JSON. */ String getAclStatus(String path) throws IOException { - StringBuilder sb = new StringBuilder(); - List aclEntryList = getAclEntryList(path); PermissionStatus p = getPermissionStatus(path); - sb.append("{\"AclStatus\":{\"entries\":["); - int i = 0; - for (AclEntry aclEntry : aclEntryList) { - if (i++ != 0) { - sb.append(','); - } - sb.append('"'); - sb.append(aclEntry.toString()); - sb.append('"'); - } - sb.append("],\"group\": \""); - sb.append(p.getGroupName()); - sb.append("\",\"owner\": \""); - sb.append(p.getUserName()); - sb.append("\",\"stickyBit\": "); - sb.append(p.getPermission().getStickyBit()); - sb.append("}}\n"); - return sb.toString(); + List aclEntryList = getAclEntryList(path); + FsPermission permission = p.getPermission(); + AclStatus.Builder builder = new AclStatus.Builder(); + builder.owner(p.getUserName()).group(p.getGroupName()) + .addEntries(aclEntryList).setPermission(permission) + .stickyBit(permission.getStickyBit()); + AclStatus aclStatus = builder.build(); + return JsonUtil.toJsonString(aclStatus); } private List getAclEntryList(String path) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 0a2ae2652db..aa6100cc318 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -655,6 +655,16 @@ public class JsonUtil { m.put("group", status.getGroup()); m.put("stickyBit", status.isStickyBit()); m.put("entries", status.getEntries()); + FsPermission perm = status.getPermission(); + if (perm != null) { + m.put("permission", toString(perm)); + if (perm.getAclBit()) { + m.put("aclBit", true); + } + if (perm.getEncryptedBit()) { + m.put("encBit", true); + } + } final Map> finalMap = new TreeMap>(); finalMap.put(AclStatus.class.getSimpleName(), m); @@ -673,7 +683,12 @@ public class JsonUtil { aclStatusBuilder.owner((String) m.get("owner")); aclStatusBuilder.group((String) m.get("group")); aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit")); - + String permString = (String) m.get("permission"); + if (permString != null) { + final FsPermission permission = toFsPermission(permString, + (Boolean) m.get("aclBit"), (Boolean) m.get("encBit")); + aclStatusBuilder.setPermission(permission); + } final Object[] entries = (Object[]) m.get("entries"); List aclEntryList = new ArrayList(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto index e940142e339..57cc8557867 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto @@ -58,6 +58,7 @@ message AclStatusProto { required string group = 2; required bool sticky = 3; repeated AclEntryProto entries = 4; + optional FsPermissionProto permission = 5; } message AclEditLogProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm index 54cd2ed0a4b..662f8b81236 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm @@ -919,6 +919,7 @@ Transfer-Encoding: chunked ], "group": "supergroup", "owner": "hadoop", + "permission":"775", "stickyBit": false } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java index aff133f270d..eda0a28580c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -1317,6 +1317,52 @@ public abstract class FSAclBaseTest { } } + @Test + public void testEffectiveAccess() throws Exception { + Path p1 = new Path("/testEffectiveAccess"); + fs.mkdirs(p1); + // give all access at first + fs.setPermission(p1, FsPermission.valueOf("-rwxrwxrwx")); + AclStatus aclStatus = fs.getAclStatus(p1); + assertEquals("Entries should be empty", 0, aclStatus.getEntries().size()); + assertEquals("Permission should be carried by AclStatus", + fs.getFileStatus(p1).getPermission(), aclStatus.getPermission()); + + // Add a named entries with all access + fs.modifyAclEntries(p1, Lists.newArrayList( + aclEntry(ACCESS, USER, "bruce", ALL), + aclEntry(ACCESS, GROUP, "groupY", ALL))); + aclStatus = fs.getAclStatus(p1); + assertEquals("Entries should contain owner group entry also", 3, aclStatus + .getEntries().size()); + + // restrict the access + fs.setPermission(p1, FsPermission.valueOf("-rwxr-----")); + // latest permissions should be reflected as effective permission + aclStatus = fs.getAclStatus(p1); + List entries = aclStatus.getEntries(); + for (AclEntry aclEntry : entries) { + if (aclEntry.getName() != null || aclEntry.getType() == GROUP) { + assertEquals(FsAction.ALL, aclEntry.getPermission()); + assertEquals(FsAction.READ, aclStatus.getEffectivePermission(aclEntry)); + } + } + fsAsBruce.access(p1, READ); + try { + fsAsBruce.access(p1, WRITE); + fail("Access should not be given"); + } catch (AccessControlException e) { + // expected + } + fsAsBob.access(p1, READ); + try { + fsAsBob.access(p1, WRITE); + fail("Access should not be given"); + } catch (AccessControlException e) { + // expected + } + } + /** * Creates a FileSystem for the super-user. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml index 21031ad2d20..82a580926a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml @@ -317,6 +317,59 @@ + + setfacl : Add minimal default ACL + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfacl -m default:user::rwx /dir1 + -fs NAMENODE -getfacl /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + SubstringComparator + # file: /dir1 + + + SubstringComparator + # owner: USERNAME + + + SubstringComparator + # group: supergroup + + + SubstringComparator + user::rwx + + + SubstringComparator + group::r-x + + + SubstringComparator + other::r-x + + + SubstringComparator + default:user::rwx + + + SubstringComparator + default:group::r-x + + + SubstringComparator + default:other::r-x + + + RegexpAcrossOutputComparator + .*(?!default\:mask)* + + + setfacl : try adding default ACL to file