HDFS-4685. Merge HDFS ACLs to branch-2. Merging changes r1569870, r1570466, r1570655, r1571573, r1571745, r1572142, r1572189 and r1572190 from trunk to branch-2.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1572308 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris Nauroth 2014-02-26 22:32:27 +00:00
parent 39922a7b4c
commit 2244459186
94 changed files with 11380 additions and 433 deletions

View File

@ -25,8 +25,6 @@
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
@ -40,7 +38,6 @@
import java.util.Set;
import java.util.Stack;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
@ -51,6 +48,8 @@
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.io.Text;
@ -2270,6 +2269,88 @@ public void deleteSnapshot(Path path, String snapshotName)
+ " doesn't support deleteSnapshot");
}
/**
* Modifies ACL entries of files and directories. This method can add new ACL
* entries or modify the permissions on existing ACL entries. All existing
* ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.)
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications
* @throws IOException if an ACL could not be modified
*/
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support modifyAclEntries");
}
/**
* Removes ACL entries from files and directories. Other ACL entries are
* retained.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing entries to remove
* @throws IOException if an ACL could not be modified
*/
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAclEntries");
}
/**
* Removes all default ACL entries from files and directories.
*
* @param path Path to modify
* @throws IOException if an ACL could not be modified
*/
public void removeDefaultAcl(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeDefaultAcl");
}
/**
* Removes all but the base ACL entries of files and directories. The entries
* for user, group, and others are retained for compatibility with permission
* bits.
*
* @param path Path to modify
* @throws IOException if an ACL could not be removed
*/
public void removeAcl(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAcl");
}
/**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications, must include entries
* for user, group, and others for compatibility with permission bits.
* @throws IOException if an ACL could not be modified
*/
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setAcl");
}
/**
* Gets the ACL of a file or directory.
*
* @param path Path to get
* @return AclStatus describing the ACL of the file or directory
* @throws IOException if an ACL could not be read
*/
public AclStatus getAclStatus(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getAclStatus");
}
// making it volatile to be able to do a double checked locking
private volatile static boolean FILE_SYSTEMS_LOADED = false;

View File

@ -22,9 +22,13 @@
import java.net.URI;
import java.net.URISyntaxException;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.Options.ChecksumOpt;
@ -507,4 +511,36 @@ public void deleteSnapshot(Path path, String snapshotName)
throws IOException {
fs.deleteSnapshot(path, snapshotName);
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
fs.modifyAclEntries(path, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
fs.removeAclEntries(path, aclSpec);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
fs.removeDefaultAcl(path);
}
@Override
public void removeAcl(Path path) throws IOException {
fs.removeAcl(path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
fs.setAcl(path, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
return fs.getAclStatus(path);
}
}

View File

@ -563,8 +563,10 @@ private void loadPermissionInfo() {
//expected format
//-rw------- 1 username groupname ...
String permission = t.nextToken();
if (permission.length() > 10) { //files with ACLs might have a '+'
permission = permission.substring(0, 10);
if (permission.length() > FsPermission.MAX_PERMISSION_LENGTH) {
//files with ACLs might have a '+'
permission = permission.substring(0,
FsPermission.MAX_PERMISSION_LENGTH);
}
setPermission(FsPermission.valueOf(permission));
t.nextToken();

View File

@ -0,0 +1,301 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import com.google.common.base.Objects;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
/**
* Defines a single entry in an ACL. An ACL entry has a type (user, group,
* mask, or other), an optional name (referring to a specific user or group), a
* set of permissions (any combination of read, write and execute), and a scope
* (access or default). AclEntry instances are immutable. Use a {@link Builder}
* to create a new instance.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class AclEntry {
private final AclEntryType type;
private final String name;
private final FsAction permission;
private final AclEntryScope scope;
/**
* Returns the ACL entry type.
*
* @return AclEntryType ACL entry type
*/
public AclEntryType getType() {
return type;
}
/**
* Returns the optional ACL entry name.
*
* @return String ACL entry name, or null if undefined
*/
public String getName() {
return name;
}
/**
* Returns the set of permissions in the ACL entry.
*
* @return FsAction set of permissions in the ACL entry
*/
public FsAction getPermission() {
return permission;
}
/**
* Returns the scope of the ACL entry.
*
* @return AclEntryScope scope of the ACL entry
*/
public AclEntryScope getScope() {
return scope;
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (getClass() != o.getClass()) {
return false;
}
AclEntry other = (AclEntry)o;
return Objects.equal(type, other.type) &&
Objects.equal(name, other.name) &&
Objects.equal(permission, other.permission) &&
Objects.equal(scope, other.scope);
}
@Override
public int hashCode() {
return Objects.hashCode(type, name, permission, scope);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
if (scope == AclEntryScope.DEFAULT) {
sb.append("default:");
}
if (type != null) {
sb.append(type.toString().toLowerCase());
}
sb.append(':');
if (name != null) {
sb.append(name);
}
sb.append(':');
if (permission != null) {
sb.append(permission.SYMBOL);
}
return sb.toString();
}
/**
* Builder for creating new AclEntry instances.
*/
public static class Builder {
private AclEntryType type;
private String name;
private FsAction permission;
private AclEntryScope scope = AclEntryScope.ACCESS;
/**
* Sets the ACL entry type.
*
* @param type AclEntryType ACL entry type
* @return Builder this builder, for call chaining
*/
public Builder setType(AclEntryType type) {
this.type = type;
return this;
}
/**
* Sets the optional ACL entry name.
*
* @param name String optional ACL entry name
* @return Builder this builder, for call chaining
*/
public Builder setName(String name) {
this.name = name;
return this;
}
/**
* Sets the set of permissions in the ACL entry.
*
* @param permission FsAction set of permissions in the ACL entry
* @return Builder this builder, for call chaining
*/
public Builder setPermission(FsAction permission) {
this.permission = permission;
return this;
}
/**
* Sets the scope of the ACL entry. If this method is not called, then the
* builder assumes {@link AclEntryScope#ACCESS}.
*
* @param scope AclEntryScope scope of the ACL entry
* @return Builder this builder, for call chaining
*/
public Builder setScope(AclEntryScope scope) {
this.scope = scope;
return this;
}
/**
* Builds a new AclEntry populated with the set properties.
*
* @return AclEntry new AclEntry
*/
public AclEntry build() {
return new AclEntry(type, name, permission, scope);
}
}
/**
* Private constructor.
*
* @param type AclEntryType ACL entry type
* @param name String optional ACL entry name
* @param permission FsAction set of permissions in the ACL entry
* @param scope AclEntryScope scope of the ACL entry
*/
private AclEntry(AclEntryType type, String name, FsAction permission, AclEntryScope scope) {
this.type = type;
this.name = name;
this.permission = permission;
this.scope = scope;
}
/**
* Parses a string representation of an ACL spec into a list of AclEntry
* objects. Example: "user::rwx,user:foo:rw-,group::r--,other::---"
*
* @param aclSpec
* String representation of an ACL spec.
* @param includePermission
* for setAcl operations this will be true. i.e. AclSpec should
* include permissions.<br>
* But for removeAcl operation it will be false. i.e. AclSpec should
* not contain permissions.<br>
* Example: "user:foo,group:bar"
* @return Returns list of {@link AclEntry} parsed
*/
public static List<AclEntry> parseAclSpec(String aclSpec,
boolean includePermission) {
List<AclEntry> aclEntries = new ArrayList<AclEntry>();
Collection<String> aclStrings = StringUtils.getStringCollection(aclSpec,
",");
for (String aclStr : aclStrings) {
AclEntry aclEntry = parseAclEntry(aclStr, includePermission);
aclEntries.add(aclEntry);
}
return aclEntries;
}
/**
* Parses a string representation of an ACL into a AclEntry object.<br>
*
* @param aclStr
* String representation of an ACL.<br>
* Example: "user:foo:rw-"
* @param includePermission
* for setAcl operations this will be true. i.e. Acl should include
* permissions.<br>
* But for removeAcl operation it will be false. i.e. Acl should not
* contain permissions.<br>
* Example: "user:foo,group:bar,mask::"
* @return Returns an {@link AclEntry} object
*/
public static AclEntry parseAclEntry(String aclStr,
boolean includePermission) {
AclEntry.Builder builder = new AclEntry.Builder();
// Here "::" represent one empty string.
// StringUtils.getStringCollection() will ignore this.
String[] split = aclStr.split(":");
if (split.length == 0) {
throw new HadoopIllegalArgumentException("Invalid <aclSpec> : " + aclStr);
}
int index = 0;
if ("default".equals(split[0])) {
// default entry
index++;
builder.setScope(AclEntryScope.DEFAULT);
}
if (split.length <= index) {
throw new HadoopIllegalArgumentException("Invalid <aclSpec> : " + aclStr);
}
AclEntryType aclType = null;
try {
aclType = Enum.valueOf(AclEntryType.class, split[index].toUpperCase());
builder.setType(aclType);
index++;
} catch (IllegalArgumentException iae) {
throw new HadoopIllegalArgumentException(
"Invalid type of acl in <aclSpec> :" + aclStr);
}
if (split.length > index) {
String name = split[index];
if (!name.isEmpty()) {
builder.setName(name);
}
index++;
}
if (includePermission) {
if (split.length < index) {
throw new HadoopIllegalArgumentException("Invalid <aclSpec> : "
+ aclStr);
}
String permission = split[index];
FsAction fsAction = FsAction.getFsAction(permission);
if (null == fsAction) {
throw new HadoopIllegalArgumentException(
"Invalid permission in <aclSpec> : " + aclStr);
}
builder.setPermission(fsAction);
index++;
}
if (split.length > index) {
throw new HadoopIllegalArgumentException("Invalid <aclSpec> : " + aclStr);
}
AclEntry aclEntry = builder.build();
return aclEntry;
}
}

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Specifies the scope or intended usage of an ACL entry.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum AclEntryScope {
/**
* An ACL entry that is inspected during permission checks to enforce
* permissions.
*/
ACCESS,
/**
* An ACL entry to be applied to a directory's children that do not otherwise
* have their own ACL defined. Unlike an access ACL entry, a default ACL
* entry is not inspected as part of permission enforcement on the directory
* that owns it.
*/
DEFAULT;
}

View File

@ -0,0 +1,58 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Specifies the type of an ACL entry.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum AclEntryType {
/**
* An ACL entry applied to a specific user. These ACL entries can be unnamed,
* which applies to the file owner, or named, which applies to the specific
* named user.
*/
USER,
/**
* An ACL entry applied to a specific group. These ACL entries can be
* unnamed, which applies to the file's group, or named, which applies to the
* specific named group.
*/
GROUP,
/**
* An ACL mask entry. Mask entries are unnamed. During permission checks,
* the mask entry interacts with all ACL entries that are members of the group
* class. This consists of all named user entries, the unnamed group entry,
* and all named group entries. For each such entry, any permissions that are
* absent from the mask entry are removed from the effective permissions used
* during the permission check.
*/
MASK,
/**
* An ACL entry that applies to all other users that were not covered by one
* of the more specific ACL entry types.
*/
OTHER;
}

View File

@ -0,0 +1,201 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Objects;
import com.google.common.collect.Lists;
/**
* An AclStatus contains the ACL information of a specific file. AclStatus
* instances are immutable. Use a {@link Builder} to create a new instance.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class AclStatus {
private final String owner;
private final String group;
private final boolean stickyBit;
private final List<AclEntry> entries;
/**
* Returns the file owner.
*
* @return String file owner
*/
public String getOwner() {
return owner;
}
/**
* Returns the file group.
*
* @return String file group
*/
public String getGroup() {
return group;
}
/**
* Returns the sticky bit.
*
* @return boolean sticky bit
*/
public boolean isStickyBit() {
return stickyBit;
}
/**
* Returns the list of all ACL entries, ordered by their natural ordering.
*
* @return List<AclEntry> unmodifiable ordered list of all ACL entries
*/
public List<AclEntry> getEntries() {
return entries;
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (getClass() != o.getClass()) {
return false;
}
AclStatus other = (AclStatus)o;
return Objects.equal(owner, other.owner)
&& Objects.equal(group, other.group)
&& stickyBit == other.stickyBit
&& Objects.equal(entries, other.entries);
}
@Override
public int hashCode() {
return Objects.hashCode(owner, group, stickyBit, entries);
}
@Override
public String toString() {
return new StringBuilder()
.append("owner: ").append(owner)
.append(", group: ").append(group)
.append(", acl: {")
.append("entries: ").append(entries)
.append(", stickyBit: ").append(stickyBit)
.append('}')
.toString();
}
/**
* Builder for creating new Acl instances.
*/
public static class Builder {
private String owner;
private String group;
private boolean stickyBit;
private List<AclEntry> entries = Lists.newArrayList();
/**
* Sets the file owner.
*
* @param owner String file owner
* @return Builder this builder, for call chaining
*/
public Builder owner(String owner) {
this.owner = owner;
return this;
}
/**
* Sets the file group.
*
* @param group String file group
* @return Builder this builder, for call chaining
*/
public Builder group(String group) {
this.group = group;
return this;
}
/**
* Adds an ACL entry.
*
* @param e AclEntry entry to add
* @return Builder this builder, for call chaining
*/
public Builder addEntry(AclEntry e) {
this.entries.add(e);
return this;
}
/**
* Adds a list of ACL entries.
*
* @param entries AclEntry entries to add
* @return Builder this builder, for call chaining
*/
public Builder addEntries(Iterable<AclEntry> entries) {
for (AclEntry e : entries)
this.entries.add(e);
return this;
}
/**
* Sets sticky bit. If this method is not called, then the builder assumes
* false.
*
* @param stickyBit
* boolean sticky bit
* @return Builder this builder, for call chaining
*/
public Builder stickyBit(boolean stickyBit) {
this.stickyBit = stickyBit;
return this;
}
/**
* Builds a new AclStatus populated with the set properties.
*
* @return AclStatus new AclStatus
*/
public AclStatus build() {
return new AclStatus(owner, group, stickyBit, entries);
}
}
/**
* Private constructor.
*
* @param file Path file associated to this ACL
* @param owner String file owner
* @param group String file group
* @param stickyBit the sticky bit
* @param entries the ACL entries
*/
private AclStatus(String owner, String group, boolean stickyBit,
Iterable<AclEntry> entries) {
this.owner = owner;
this.group = group;
this.stickyBit = stickyBit;
this.entries = Lists.newArrayList(entries);
}
}

View File

@ -23,8 +23,8 @@
/**
* File system actions, e.g. read, write, etc.
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable
@InterfaceAudience.Public
@InterfaceStability.Stable
public enum FsAction {
// POSIX style
NONE("---"),
@ -69,4 +69,21 @@ public FsAction or(FsAction that) {
public FsAction not() {
return vals[7 - ordinal()];
}
/**
* Get the FsAction enum for String representation of permissions
*
* @param permission
* 3-character string representation of permission. ex: rwx
* @return Returns FsAction enum if the corresponding FsAction exists for permission.
* Otherwise returns null
*/
public static FsAction getFsAction(String permission) {
for (FsAction fsAction : vals) {
if (fsAction.SYMBOL.equals(permission)) {
return fsAction;
}
}
return null;
}
}

View File

@ -48,6 +48,9 @@ public class FsPermission implements Writable {
WritableFactories.setFactory(ImmutableFsPermission.class, FACTORY);
}
/** Maximum acceptable length of a permission string to parse */
public static final int MAX_PERMISSION_LENGTH = 10;
/** Create an immutable {@link FsPermission} object. */
public static FsPermission createImmutable(short permission) {
return new ImmutableFsPermission(permission);
@ -319,9 +322,10 @@ public static FsPermission valueOf(String unixSymbolicPermission) {
if (unixSymbolicPermission == null) {
return null;
}
else if (unixSymbolicPermission.length() != 10) {
throw new IllegalArgumentException("length != 10(unixSymbolicPermission="
+ unixSymbolicPermission + ")");
else if (unixSymbolicPermission.length() != MAX_PERMISSION_LENGTH) {
throw new IllegalArgumentException(String.format(
"length != %d(unixSymbolicPermission=%s)", MAX_PERMISSION_LENGTH,
unixSymbolicPermission));
}
int n = 0;

View File

@ -0,0 +1,325 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import com.google.common.collect.Lists;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
/**
* Acl related operations
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
class AclCommands extends FsCommand {
private static String GET_FACL = "getfacl";
private static String SET_FACL = "setfacl";
public static void registerCommands(CommandFactory factory) {
factory.addClass(GetfaclCommand.class, "-" + GET_FACL);
factory.addClass(SetfaclCommand.class, "-" + SET_FACL);
}
/**
* Implementing the '-getfacl' command for the the FsShell.
*/
public static class GetfaclCommand extends FsCommand {
public static String NAME = GET_FACL;
public static String USAGE = "[-R] <path>";
public static String DESCRIPTION = "Displays the Access Control Lists"
+ " (ACLs) of files and directories. If a directory has a default ACL,"
+ " then getfacl also displays the default ACL.\n"
+ "-R: List the ACLs of all files and directories recursively.\n"
+ "<path>: File or directory to list.\n";
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "R");
cf.parse(args);
setRecursive(cf.getOpt("R"));
if (args.isEmpty()) {
throw new HadoopIllegalArgumentException("<path> is missing");
}
if (args.size() > 1) {
throw new HadoopIllegalArgumentException("Too many arguments");
}
}
@Override
protected void processPath(PathData item) throws IOException {
AclStatus aclStatus = item.fs.getAclStatus(item.path);
out.println("# file: " + item);
out.println("# owner: " + aclStatus.getOwner());
out.println("# group: " + aclStatus.getGroup());
List<AclEntry> entries = aclStatus.getEntries();
if (aclStatus.isStickyBit()) {
String stickyFlag = "T";
for (AclEntry aclEntry : entries) {
if (aclEntry.getType() == AclEntryType.OTHER
&& aclEntry.getScope() == AclEntryScope.ACCESS
&& aclEntry.getPermission().implies(FsAction.EXECUTE)) {
stickyFlag = "t";
break;
}
}
out.println("# flags: --" + stickyFlag);
}
FsPermission perm = item.stat.getPermission();
if (entries.isEmpty()) {
printMinimalAcl(perm);
} else {
printExtendedAcl(perm, entries);
}
out.println();
}
/**
* Prints an extended ACL, including all extended ACL entries and also the
* base entries implied by the permission bits.
*
* @param perm FsPermission of file
* @param entries List<AclEntry> containing ACL entries of file
*/
private void printExtendedAcl(FsPermission perm, List<AclEntry> entries) {
// Print owner entry implied by owner permission bits.
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.setPermission(perm.getUserAction())
.build());
// Print all extended access ACL entries.
boolean hasAccessAcl = false;
Iterator<AclEntry> entryIter = entries.iterator();
AclEntry curEntry = null;
while (entryIter.hasNext()) {
curEntry = entryIter.next();
if (curEntry.getScope() == AclEntryScope.DEFAULT) {
break;
}
hasAccessAcl = true;
printExtendedAclEntry(curEntry, perm.getGroupAction());
}
// Print mask entry implied by group permission bits, or print group entry
// if there is no access ACL (only default ACL).
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP)
.setPermission(perm.getGroupAction())
.build());
// Print other entry implied by other bits.
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.setPermission(perm.getOtherAction())
.build());
// Print default ACL entries.
if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) {
out.println(curEntry);
// ACL sort order guarantees default mask is the second-to-last entry.
FsAction maskPerm = entries.get(entries.size() - 2).getPermission();
while (entryIter.hasNext()) {
printExtendedAclEntry(entryIter.next(), maskPerm);
}
}
}
/**
* Prints a single extended ACL entry. If the mask restricts the
* permissions of the entry, then also prints the restricted version as the
* effective permissions. The mask applies to all named entries and also
* the unnamed group entry.
*
* @param entry AclEntry extended ACL entry to print
* @param maskPerm FsAction permissions in the ACL's mask entry
*/
private void printExtendedAclEntry(AclEntry entry, FsAction maskPerm) {
if (entry.getName() != null || entry.getType() == AclEntryType.GROUP) {
FsAction entryPerm = entry.getPermission();
FsAction effectivePerm = entryPerm.and(maskPerm);
if (entryPerm != effectivePerm) {
out.println(String.format("%s\t#effective:%s", entry,
effectivePerm.SYMBOL));
} else {
out.println(entry);
}
} else {
out.println(entry);
}
}
/**
* Prints a minimal ACL, consisting of exactly 3 ACL entries implied by the
* permission bits.
*
* @param perm FsPermission of file
*/
private void printMinimalAcl(FsPermission perm) {
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.setPermission(perm.getUserAction())
.build());
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.GROUP)
.setPermission(perm.getGroupAction())
.build());
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.setPermission(perm.getOtherAction())
.build());
}
}
/**
* Implementing the '-setfacl' command for the the FsShell.
*/
public static class SetfaclCommand extends FsCommand {
public static String NAME = SET_FACL;
public static String USAGE = "[-R] [{-b|-k} {-m|-x <acl_spec>} <path>]"
+ "|[--set <acl_spec> <path>]";
public static String DESCRIPTION = "Sets Access Control Lists (ACLs)"
+ " of files and directories.\n"
+ "Options:\n"
+ "-b :Remove all but the base ACL entries. The entries for user,"
+ " group and others are retained for compatibility with permission "
+ "bits.\n"
+ "-k :Remove the default ACL.\n"
+ "-R :Apply operations to all files and directories recursively.\n"
+ "-m :Modify ACL. New entries are added to the ACL, and existing"
+ " entries are retained.\n"
+ "-x :Remove specified ACL entries. Other ACL entries are retained.\n"
+ "--set :Fully replace the ACL, discarding all existing entries."
+ " The <acl_spec> must include entries for user, group, and others"
+ " for compatibility with permission bits.\n"
+ "<acl_spec>: Comma separated list of ACL entries.\n"
+ "<path>: File or directory to modify.\n";
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "b", "k", "R",
"m", "x", "-set");
List<AclEntry> aclEntries = null;
List<AclEntry> accessAclEntries = null;
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
cf.parse(args);
setRecursive(cf.getOpt("R"));
// Mix of remove and modify acl flags are not allowed
boolean bothRemoveOptions = cf.getOpt("b") && cf.getOpt("k");
boolean bothModifyOptions = cf.getOpt("m") && cf.getOpt("x");
boolean oneRemoveOption = cf.getOpt("b") || cf.getOpt("k");
boolean oneModifyOption = cf.getOpt("m") || cf.getOpt("x");
boolean setOption = cf.getOpt("-set");
if ((bothRemoveOptions || bothModifyOptions)
|| (oneRemoveOption && oneModifyOption)
|| (setOption && (oneRemoveOption || oneModifyOption))) {
throw new HadoopIllegalArgumentException(
"Specified flags contains both remove and modify flags");
}
// Only -m, -x and --set expects <acl_spec>
if (oneModifyOption || setOption) {
if (args.size() < 2) {
throw new HadoopIllegalArgumentException("<acl_spec> is missing");
}
aclEntries = AclEntry.parseAclSpec(args.removeFirst(), !cf.getOpt("x"));
}
if (args.isEmpty()) {
throw new HadoopIllegalArgumentException("<path> is missing");
}
if (args.size() > 1) {
throw new HadoopIllegalArgumentException("Too many arguments");
}
// In recursive mode, save a separate list of just the access ACL entries.
// Only directories may have a default ACL. When a recursive operation
// encounters a file under the specified path, it must pass only the
// access ACL entries.
if (isRecursive() && (oneModifyOption || setOption)) {
accessAclEntries = Lists.newArrayList();
for (AclEntry entry: aclEntries) {
if (entry.getScope() == AclEntryScope.ACCESS) {
accessAclEntries.add(entry);
}
}
}
}
@Override
protected void processPath(PathData item) throws IOException {
if (cf.getOpt("b")) {
item.fs.removeAcl(item.path);
} else if (cf.getOpt("k")) {
item.fs.removeDefaultAcl(item.path);
} else if (cf.getOpt("m")) {
List<AclEntry> entries = getAclEntries(item);
if (!entries.isEmpty()) {
item.fs.modifyAclEntries(item.path, entries);
}
} else if (cf.getOpt("x")) {
List<AclEntry> entries = getAclEntries(item);
if (!entries.isEmpty()) {
item.fs.removeAclEntries(item.path, entries);
}
} else if (cf.getOpt("-set")) {
List<AclEntry> entries = getAclEntries(item);
if (!entries.isEmpty()) {
item.fs.setAcl(item.path, entries);
}
}
}
/**
* Returns the ACL entries to use in the API call for the given path. For a
* recursive operation, returns all specified ACL entries if the item is a
* directory or just the access ACL entries if the item is a file. For a
* non-recursive operation, returns all specified ACL entries.
*
* @param item PathData path to check
* @return List<AclEntry> ACL entries to use in the API call
*/
private List<AclEntry> getAclEntries(PathData item) {
if (isRecursive()) {
return item.stat.isDirectory() ? aclEntries : accessAclEntries;
} else {
return aclEntries;
}
}
}
}

View File

@ -43,6 +43,7 @@ abstract public class FsCommand extends Command {
* @param factory where to register the class
*/
public static void registerCommands(CommandFactory factory) {
factory.registerCommands(AclCommands.class);
factory.registerCommands(CopyCommands.class);
factory.registerCommands(Count.class);
factory.registerCommands(Delete.class);

View File

@ -19,15 +19,22 @@
package org.apache.hadoop.fs.shell;
import java.io.IOException;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.LinkedList;
import java.util.Set;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcNoSuchMethodException;
import com.google.common.collect.Sets;
/**
* Get a listing of all files in that match the file patterns.
@ -65,6 +72,8 @@ public static void registerCommands(CommandFactory factory) {
protected boolean dirRecurse;
protected boolean humanReadable = false;
private Set<URI> aclNotSupportedFsSet = Sets.newHashSet();
protected String formatSize(long size) {
return humanReadable
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
@ -107,7 +116,7 @@ protected void processPath(PathData item) throws IOException {
FileStatus stat = item.stat;
String line = String.format(lineFormat,
(stat.isDirectory() ? "d" : "-"),
stat.getPermission(),
stat.getPermission() + (hasAcl(item) ? "+" : " "),
(stat.isFile() ? stat.getReplication() : "-"),
stat.getOwner(),
stat.getGroup(),
@ -132,7 +141,7 @@ private void adjustColumnWidths(PathData items[]) {
}
StringBuilder fmt = new StringBuilder();
fmt.append("%s%s "); // permission string
fmt.append("%s%s"); // permission string
fmt.append("%" + maxRepl + "s ");
// Do not use '%-0s' as a formatting conversion, since it will throw a
// a MissingFormatWidthException if it is used in String.format().
@ -144,6 +153,49 @@ private void adjustColumnWidths(PathData items[]) {
lineFormat = fmt.toString();
}
/**
* Calls getAclStatus to determine if the given item has an ACL. For
* compatibility, this method traps errors caused by the RPC method missing
* from the server side. This would happen if the client was connected to an
* old NameNode that didn't have the ACL APIs. This method also traps the
* case of the client-side FileSystem not implementing the ACL APIs.
* FileSystem instances that do not support ACLs are remembered. This
* prevents the client from sending multiple failing RPC calls during a
* recursive ls.
*
* @param item PathData item to check
* @return boolean true if item has an ACL
* @throws IOException if there is a failure
*/
private boolean hasAcl(PathData item) throws IOException {
FileSystem fs = item.fs;
if (aclNotSupportedFsSet.contains(fs.getUri())) {
// This FileSystem failed to run the ACL API in an earlier iteration.
return false;
}
try {
return !fs.getAclStatus(item.path).getEntries().isEmpty();
} catch (RemoteException e) {
// If this is a RpcNoSuchMethodException, then the client is connected to
// an older NameNode that doesn't support ACLs. Keep going.
IOException e2 = e.unwrapRemoteException(RpcNoSuchMethodException.class);
if (!(e2 instanceof RpcNoSuchMethodException)) {
throw e;
}
} catch (IOException e) {
// The NameNode supports ACLs, but they are not enabled. Keep going.
String message = e.getMessage();
if (message != null && !message.contains("ACLs has been disabled")) {
throw e;
}
} catch (UnsupportedOperationException e) {
// The underlying FileSystem doesn't implement ACLs. Keep going.
}
// Remember that this FileSystem cannot support ACLs.
aclNotSupportedFsSet.add(fs.getUri());
return false;
}
private int maxLength(int n, Object value) {
return Math.max(n, (value != null) ? String.valueOf(value).length() : 0);
}

View File

@ -20,6 +20,7 @@
import java.io.IOException;
import java.net.URI;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -36,6 +37,8 @@
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
@ -277,7 +280,39 @@ public void setTimes(final Path f, final long mtime, final long atime)
throws IOException {
super.setTimes(fullPath(f), mtime, atime);
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
super.modifyAclEntries(fullPath(path), aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
super.removeAclEntries(fullPath(path), aclSpec);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
super.removeDefaultAcl(fullPath(path));
}
@Override
public void removeAcl(Path path) throws IOException {
super.removeAcl(fullPath(path));
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
super.setAcl(fullPath(path), aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
return super.getAclStatus(fullPath(path));
}
@Override
public Path resolvePath(final Path p) throws IOException {
return super.resolvePath(fullPath(p));

View File

@ -28,7 +28,6 @@
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
@ -45,9 +44,10 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@ -473,6 +473,52 @@ public void setTimes(final Path f, final long mtime, final long atime)
res.targetFileSystem.setTimes(res.remainingPath, mtime, atime);
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
true);
res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
true);
res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec);
}
@Override
public void removeDefaultAcl(Path path)
throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeDefaultAcl(res.remainingPath);
}
@Override
public void removeAcl(Path path)
throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeAcl(res.remainingPath);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.setAcl(res.remainingPath, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
return res.targetFileSystem.getAclStatus(res.remainingPath);
}
@Override
public void setVerifyChecksum(final boolean verifyChecksum) {
List<InodeTree.MountPoint<FileSystem>> mountPoints =

View File

@ -231,6 +231,29 @@ get
Returns 0 on success and -1 on error.
getfacl
Usage: <<<hdfs dfs -getfacl [-R] <path> >>>
Displays the Access Control Lists (ACLs) of files and directories. If a
directory has a default ACL, then getfacl also displays the default ACL.
Options:
* -R: List the ACLs of all files and directories recursively.
* <path>: File or directory to list.
Examples:
* <<<hdfs dfs -getfacl /file>>>
* <<<hdfs dfs -getfacl -R /dir>>>
Exit Code:
Returns 0 on success and non-zero on error.
getmerge
Usage: <<<hdfs dfs -getmerge <src> <localdst> [addnl]>>>
@ -379,6 +402,54 @@ rmr
Returns 0 on success and -1 on error.
setfacl
Usage: <<<hdfs dfs -setfacl [-R] [{-b|-k} {-m|-x <acl_spec>} <path>]|[--set <acl_spec> <path>] >>>
Sets Access Control Lists (ACLs) of files and directories.
Options:
* -b: Remove all but the base ACL entries. The entries for user, group and
others are retained for compatibility with permission bits.
* -k: Remove the default ACL.
* -R: Apply operations to all files and directories recursively.
* -m: Modify ACL. New entries are added to the ACL, and existing entries
are retained.
* -x: Remove specified ACL entries. Other ACL entries are retained.
* --set: Fully replace the ACL, discarding all existing entries. The
<acl_spec> must include entries for user, group, and others for
compatibility with permission bits.
* <acl_spec>: Comma separated list of ACL entries.
* <path>: File or directory to modify.
Examples:
* <<<hdfs dfs -setfacl -m user:hadoop:rw- /file>>>
* <<<hdfs dfs -setfacl -x user:hadoop /file>>>
* <<<hdfs dfs -setfacl -b /file>>>
* <<<hdfs dfs -setfacl -k /dir>>>
* <<<hdfs dfs -setfacl --set user::rw-,user:hadoop:rw-,group::r--,other::r-- /file>>>
* <<<hdfs dfs -setfacl -R -m user:hadoop:r-x /dir>>>
* <<<hdfs dfs -setfacl -m default:user:hadoop:r-x /dir>>>
Exit Code:
Returns 0 on success and non-zero on error.
setrep
Usage: <<<hdfs dfs -setrep [-R] [-w] <numReplicas> <path> >>>

View File

@ -21,6 +21,8 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
@ -33,6 +35,7 @@
import java.lang.reflect.Modifier;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import static org.apache.hadoop.fs.Options.ChecksumOpt;
import static org.apache.hadoop.fs.Options.CreateOpts;
@ -165,6 +168,20 @@ public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException;
public void deleteSnapshot(Path path, String snapshotName)
throws IOException;
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException;
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException;
public void removeDefaultAcl(Path path) throws IOException;
public void removeAcl(Path path) throws IOException;
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException;
public AclStatus getAclStatus(Path path) throws IOException;
}
@Test

View File

@ -0,0 +1,210 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import static org.junit.Assert.*;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests covering basic functionality of the ACL objects.
*/
public class TestAcl {
private static AclEntry ENTRY1, ENTRY2, ENTRY3, ENTRY4, ENTRY5, ENTRY6,
ENTRY7, ENTRY8, ENTRY9, ENTRY10, ENTRY11, ENTRY12, ENTRY13;
private static AclStatus STATUS1, STATUS2, STATUS3, STATUS4;
@BeforeClass
public static void setUp() {
// named user
AclEntry.Builder aclEntryBuilder = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setName("user1")
.setPermission(FsAction.ALL);
ENTRY1 = aclEntryBuilder.build();
ENTRY2 = aclEntryBuilder.build();
// named group
ENTRY3 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setName("group2")
.setPermission(FsAction.READ_WRITE)
.build();
// default other
ENTRY4 = new AclEntry.Builder()
.setType(AclEntryType.OTHER)
.setPermission(FsAction.NONE)
.setScope(AclEntryScope.DEFAULT)
.build();
// owner
ENTRY5 = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setPermission(FsAction.ALL)
.build();
// default named group
ENTRY6 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setName("group3")
.setPermission(FsAction.READ_WRITE)
.setScope(AclEntryScope.DEFAULT)
.build();
// other
ENTRY7 = new AclEntry.Builder()
.setType(AclEntryType.OTHER)
.setPermission(FsAction.NONE)
.build();
// default named user
ENTRY8 = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setName("user3")
.setPermission(FsAction.ALL)
.setScope(AclEntryScope.DEFAULT)
.build();
// mask
ENTRY9 = new AclEntry.Builder()
.setType(AclEntryType.MASK)
.setPermission(FsAction.READ)
.build();
// default mask
ENTRY10 = new AclEntry.Builder()
.setType(AclEntryType.MASK)
.setPermission(FsAction.READ_EXECUTE)
.setScope(AclEntryScope.DEFAULT)
.build();
// group
ENTRY11 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setPermission(FsAction.READ)
.build();
// default group
ENTRY12 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setPermission(FsAction.READ)
.setScope(AclEntryScope.DEFAULT)
.build();
// default owner
ENTRY13 = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setPermission(FsAction.ALL)
.setScope(AclEntryScope.DEFAULT)
.build();
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder()
.owner("owner1")
.group("group1")
.addEntry(ENTRY1)
.addEntry(ENTRY3)
.addEntry(ENTRY4);
STATUS1 = aclStatusBuilder.build();
STATUS2 = aclStatusBuilder.build();
STATUS3 = new AclStatus.Builder()
.owner("owner2")
.group("group2")
.stickyBit(true)
.build();
STATUS4 = new AclStatus.Builder()
.addEntry(ENTRY1)
.addEntry(ENTRY3)
.addEntry(ENTRY4)
.addEntry(ENTRY5)
.addEntry(ENTRY6)
.addEntry(ENTRY7)
.addEntry(ENTRY8)
.addEntry(ENTRY9)
.addEntry(ENTRY10)
.addEntry(ENTRY11)
.addEntry(ENTRY12)
.addEntry(ENTRY13)
.build();
}
@Test
public void testEntryEquals() {
assertNotSame(ENTRY1, ENTRY2);
assertNotSame(ENTRY1, ENTRY3);
assertNotSame(ENTRY1, ENTRY4);
assertNotSame(ENTRY2, ENTRY3);
assertNotSame(ENTRY2, ENTRY4);
assertNotSame(ENTRY3, ENTRY4);
assertEquals(ENTRY1, ENTRY1);
assertEquals(ENTRY2, ENTRY2);
assertEquals(ENTRY1, ENTRY2);
assertEquals(ENTRY2, ENTRY1);
assertFalse(ENTRY1.equals(ENTRY3));
assertFalse(ENTRY1.equals(ENTRY4));
assertFalse(ENTRY3.equals(ENTRY4));
assertFalse(ENTRY1.equals(null));
assertFalse(ENTRY1.equals(new Object()));
}
@Test
public void testEntryHashCode() {
assertEquals(ENTRY1.hashCode(), ENTRY2.hashCode());
assertFalse(ENTRY1.hashCode() == ENTRY3.hashCode());
assertFalse(ENTRY1.hashCode() == ENTRY4.hashCode());
assertFalse(ENTRY3.hashCode() == ENTRY4.hashCode());
}
@Test
public void testEntryScopeIsAccessIfUnspecified() {
assertEquals(AclEntryScope.ACCESS, ENTRY1.getScope());
assertEquals(AclEntryScope.ACCESS, ENTRY2.getScope());
assertEquals(AclEntryScope.ACCESS, ENTRY3.getScope());
assertEquals(AclEntryScope.DEFAULT, ENTRY4.getScope());
}
@Test
public void testStatusEquals() {
assertNotSame(STATUS1, STATUS2);
assertNotSame(STATUS1, STATUS3);
assertNotSame(STATUS2, STATUS3);
assertEquals(STATUS1, STATUS1);
assertEquals(STATUS2, STATUS2);
assertEquals(STATUS1, STATUS2);
assertEquals(STATUS2, STATUS1);
assertFalse(STATUS1.equals(STATUS3));
assertFalse(STATUS2.equals(STATUS3));
assertFalse(STATUS1.equals(null));
assertFalse(STATUS1.equals(new Object()));
}
@Test
public void testStatusHashCode() {
assertEquals(STATUS1.hashCode(), STATUS2.hashCode());
assertFalse(STATUS1.hashCode() == STATUS3.hashCode());
}
@Test
public void testToString() {
assertEquals("user:user1:rwx", ENTRY1.toString());
assertEquals("user:user1:rwx", ENTRY2.toString());
assertEquals("group:group2:rw-", ENTRY3.toString());
assertEquals("default:other::---", ENTRY4.toString());
assertEquals(
"owner: owner1, group: group1, acl: {entries: [user:user1:rwx, group:group2:rw-, default:other::---], stickyBit: false}",
STATUS1.toString());
assertEquals(
"owner: owner1, group: group1, acl: {entries: [user:user1:rwx, group:group2:rw-, default:other::---], stickyBit: false}",
STATUS2.toString());
assertEquals(
"owner: owner2, group: group2, acl: {entries: [], stickyBit: true}",
STATUS3.toString());
}
}

View File

@ -54,7 +54,7 @@ public void testFsAction() {
* the expected values back out for all combinations
*/
public void testConvertingPermissions() {
for(short s = 0; s < 01777; s++) {
for(short s = 0; s <= 01777; s++) {
assertEquals(s, new FsPermission(s).toShort());
}
@ -64,10 +64,12 @@ public void testConvertingPermissions() {
for(FsAction u : FsAction.values()) {
for(FsAction g : FsAction.values()) {
for(FsAction o : FsAction.values()) {
// Cover constructor with sticky bit.
FsPermission f = new FsPermission(u, g, o, sb);
assertEquals(s, f.toShort());
FsPermission f2 = new FsPermission(f);
assertEquals(s, f2.toShort());
s++;
}
}
@ -75,48 +77,57 @@ public void testConvertingPermissions() {
}
}
public void testStickyBitToString() {
// Check that every permission has its sticky bit represented correctly
for(boolean sb : new boolean [] { false, true }) {
for(FsAction u : FsAction.values()) {
for(FsAction g : FsAction.values()) {
for(FsAction o : FsAction.values()) {
public void testSpecialBitsToString() {
for (boolean sb : new boolean[] { false, true }) {
for (FsAction u : FsAction.values()) {
for (FsAction g : FsAction.values()) {
for (FsAction o : FsAction.values()) {
FsPermission f = new FsPermission(u, g, o, sb);
if(f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
assertEquals('t', f.toString().charAt(8));
else if(f.getStickyBit() && !f.getOtherAction().implies(EXECUTE))
assertEquals('T', f.toString().charAt(8));
else if(!f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
assertEquals('x', f.toString().charAt(8));
String fString = f.toString();
// Check that sticky bit is represented correctly.
if (f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
assertEquals('t', fString.charAt(8));
else if (f.getStickyBit() && !f.getOtherAction().implies(EXECUTE))
assertEquals('T', fString.charAt(8));
else if (!f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
assertEquals('x', fString.charAt(8));
else
assertEquals('-', f.toString().charAt(8));
assertEquals('-', fString.charAt(8));
assertEquals(9, fString.length());
}
}
}
}
}
public void testFsPermission() {
String symbolic = "-rwxrwxrwx";
StringBuilder b = new StringBuilder("-123456789");
String symbolic = "-rwxrwxrwx";
for(int i = 0; i < (1<<9); i++) {
for(int j = 1; j < 10; j++) {
b.setCharAt(j, '-');
for(int i = 0; i < (1 << 10); i++) {
StringBuilder b = new StringBuilder("----------");
String binary = String.format("%11s", Integer.toBinaryString(i));
String permBinary = binary.substring(2, binary.length());
int len = permBinary.length();
for(int j = 0; j < len; j++) {
if (permBinary.charAt(j) == '1') {
int k = 9 - (len - 1 - j);
b.setCharAt(k, symbolic.charAt(k));
}
String binary = Integer.toBinaryString(i);
int len = binary.length();
for(int j = 0; j < len; j++) {
if (binary.charAt(j) == '1') {
int k = 9 - (len - 1 - j);
b.setCharAt(k, symbolic.charAt(k));
}
}
assertEquals(i, FsPermission.valueOf(b.toString()).toShort());
}
// Check for sticky bit.
if (binary.charAt(1) == '1') {
char replacement = b.charAt(9) == 'x' ? 't' : 'T';
b.setCharAt(9, replacement);
}
assertEquals(i, FsPermission.valueOf(b.toString()).toShort());
}
}
public void testUMaskParser() throws IOException {
Configuration conf = new Configuration();

View File

@ -0,0 +1,240 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcNoSuchMethodException;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Before;
import org.junit.Test;
public class TestAclCommands {
private Configuration conf = null;
@Before
public void setup() throws IOException {
conf = new Configuration();
}
@Test
public void testGetfaclValidations() throws Exception {
assertFalse("getfacl should fail without path",
0 == runCommand(new String[] { "-getfacl" }));
assertFalse("getfacl should fail with extra argument",
0 == runCommand(new String[] { "-getfacl", "/test", "extraArg" }));
}
@Test
public void testSetfaclValidations() throws Exception {
assertFalse("setfacl should fail without path",
0 == runCommand(new String[] { "-setfacl" }));
assertFalse("setfacl should fail without aclSpec",
0 == runCommand(new String[] { "-setfacl", "-m", "/path" }));
assertFalse("setfacl should fail with conflicting options",
0 == runCommand(new String[] { "-setfacl", "-m", "/path" }));
assertFalse("setfacl should fail with extra arguments",
0 == runCommand(new String[] { "-setfacl", "/path", "extra" }));
assertFalse("setfacl should fail with extra arguments",
0 == runCommand(new String[] { "-setfacl", "--set",
"default:user::rwx", "/path", "extra" }));
assertFalse("setfacl should fail with permissions for -x",
0 == runCommand(new String[] { "-setfacl", "-x", "user:user1:rwx",
"/path" }));
assertFalse("setfacl should fail ACL spec missing",
0 == runCommand(new String[] { "-setfacl", "-m",
"", "/path" }));
}
@Test
public void testMultipleAclSpecParsing() throws Exception {
List<AclEntry> parsedList = AclEntry.parseAclSpec(
"group::rwx,user:user1:rwx,user:user2:rw-,"
+ "group:group1:rw-,default:group:group1:rw-", true);
AclEntry basicAcl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.ALL).build();
AclEntry user1Acl = new AclEntry.Builder().setType(AclEntryType.USER)
.setPermission(FsAction.ALL).setName("user1").build();
AclEntry user2Acl = new AclEntry.Builder().setType(AclEntryType.USER)
.setPermission(FsAction.READ_WRITE).setName("user2").build();
AclEntry group1Acl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.READ_WRITE).setName("group1").build();
AclEntry defaultAcl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.READ_WRITE).setName("group1")
.setScope(AclEntryScope.DEFAULT).build();
List<AclEntry> expectedList = new ArrayList<AclEntry>();
expectedList.add(basicAcl);
expectedList.add(user1Acl);
expectedList.add(user2Acl);
expectedList.add(group1Acl);
expectedList.add(defaultAcl);
assertEquals("Parsed Acl not correct", expectedList, parsedList);
}
@Test
public void testMultipleAclSpecParsingWithoutPermissions() throws Exception {
List<AclEntry> parsedList = AclEntry.parseAclSpec(
"user::,user:user1:,group::,group:group1:,mask::,other::,"
+ "default:user:user1::,default:mask::", false);
AclEntry owner = new AclEntry.Builder().setType(AclEntryType.USER).build();
AclEntry namedUser = new AclEntry.Builder().setType(AclEntryType.USER)
.setName("user1").build();
AclEntry group = new AclEntry.Builder().setType(AclEntryType.GROUP).build();
AclEntry namedGroup = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setName("group1").build();
AclEntry mask = new AclEntry.Builder().setType(AclEntryType.MASK).build();
AclEntry other = new AclEntry.Builder().setType(AclEntryType.OTHER).build();
AclEntry defaultUser = new AclEntry.Builder()
.setScope(AclEntryScope.DEFAULT).setType(AclEntryType.USER)
.setName("user1").build();
AclEntry defaultMask = new AclEntry.Builder()
.setScope(AclEntryScope.DEFAULT).setType(AclEntryType.MASK).build();
List<AclEntry> expectedList = new ArrayList<AclEntry>();
expectedList.add(owner);
expectedList.add(namedUser);
expectedList.add(group);
expectedList.add(namedGroup);
expectedList.add(mask);
expectedList.add(other);
expectedList.add(defaultUser);
expectedList.add(defaultMask);
assertEquals("Parsed Acl not correct", expectedList, parsedList);
}
@Test
public void testLsNoRpcForGetAclStatus() throws Exception {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
conf.setBoolean("stubfs.noRpcForGetAclStatus", true);
assertEquals("ls must succeed even if getAclStatus RPC does not exist.",
0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" }));
}
@Test
public void testLsAclsUnsupported() throws Exception {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
assertEquals("ls must succeed even if FileSystem does not implement ACLs.",
0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" }));
}
public static class StubFileSystem extends FileSystem {
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return null;
}
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return false;
}
public AclStatus getAclStatus(Path path) throws IOException {
if (getConf().getBoolean("stubfs.noRpcForGetAclStatus", false)) {
throw new RemoteException(RpcNoSuchMethodException.class.getName(),
"test exception");
}
return super.getAclStatus(path);
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
if (f.isRoot()) {
return new FileStatus(0, true, 0, 0, 0, f);
}
return null;
}
@Override
public URI getUri() {
return URI.create("stubfs:///");
}
@Override
public Path getWorkingDirectory() {
return null;
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
FsPermission perm = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE,
FsAction.READ_EXECUTE);
Path path = new Path("/foo");
FileStatus stat = new FileStatus(1000, true, 3, 1000, 0, 0, perm, "owner",
"group", path);
return new FileStatus[] { stat };
}
@Override
public boolean mkdirs(Path f, FsPermission permission)
throws IOException {
return false;
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return null;
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
return false;
}
@Override
public void setWorkingDirectory(Path dir) {
}
}
private int runCommand(String[] commands) throws Exception {
return ToolRunner.run(conf, new FsShell(), commands);
}
}

View File

@ -20,6 +20,8 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
@ -29,6 +31,7 @@
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.viewfs.ChRootedFileSystem;
import org.junit.After;
import org.junit.Assert;
@ -354,6 +357,44 @@ public void testURIEmptyPath() throws IOException {
new ChRootedFileSystem(chrootUri, conf);
}
/**
* Tests that ChRootedFileSystem delegates calls for every ACL method to the
* underlying FileSystem with all Path arguments translated as required to
* enforce chroot.
*/
@Test
public void testAclMethodsPathTranslation() throws IOException {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem)chrootFs.getRawFileSystem())
.getRawFileSystem();
Path chrootPath = new Path("/c");
Path rawPath = new Path("/a/b/c");
List<AclEntry> entries = Collections.emptyList();
chrootFs.modifyAclEntries(chrootPath, entries);
verify(mockFs).modifyAclEntries(rawPath, entries);
chrootFs.removeAclEntries(chrootPath, entries);
verify(mockFs).removeAclEntries(rawPath, entries);
chrootFs.removeDefaultAcl(chrootPath);
verify(mockFs).removeDefaultAcl(rawPath);
chrootFs.removeAcl(chrootPath);
verify(mockFs).removeAcl(rawPath);
chrootFs.setAcl(chrootPath, entries);
verify(mockFs).setAcl(rawPath, entries);
chrootFs.getAclStatus(chrootPath);
verify(mockFs).getAclStatus(rawPath);
}
static class MockFileSystem extends FilterFileSystem {
MockFileSystem() {
super(mock(FileSystem.class));
@ -361,4 +402,4 @@ static class MockFileSystem extends FilterFileSystem {
@Override
public void initialize(URI name, Configuration conf) throws IOException {}
}
}
}

View File

@ -20,14 +20,19 @@
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.viewfs.TestChRootedFileSystem.MockFileSystem;
import org.junit.*;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
/**
* Verify that viewfs propagates certain methods to the underlying fs
@ -57,6 +62,15 @@ static FakeFileSystem setupFileSystem(URI uri, Class clazz)
return fs;
}
private static FileSystem setupMockFileSystem(Configuration conf, URI uri)
throws Exception {
String scheme = uri.getScheme();
conf.set("fs." + scheme + ".impl", MockFileSystem.class.getName());
FileSystem fs = FileSystem.get(uri, conf);
ConfigUtil.addLink(conf, "/mounts/" + scheme, uri);
return ((MockFileSystem)fs).getRawFileSystem();
}
@Test
public void testSanity() {
assertEquals("fs1:/", fs1.getUri().toString());
@ -69,6 +83,55 @@ public void testVerifyChecksum() throws Exception {
checkVerifyChecksum(true);
}
/**
* Tests that ViewFileSystem dispatches calls for every ACL method through the
* mount table to the correct underlying FileSystem with all Path arguments
* translated as required.
*/
@Test
public void testAclMethods() throws Exception {
Configuration conf = ViewFileSystemTestSetup.createConfig();
FileSystem mockFs1 = setupMockFileSystem(conf, new URI("mockfs1:/"));
FileSystem mockFs2 = setupMockFileSystem(conf, new URI("mockfs2:/"));
FileSystem viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
Path viewFsPath1 = new Path("/mounts/mockfs1/a/b/c");
Path mockFsPath1 = new Path("/a/b/c");
Path viewFsPath2 = new Path("/mounts/mockfs2/d/e/f");
Path mockFsPath2 = new Path("/d/e/f");
List<AclEntry> entries = Collections.emptyList();
viewFs.modifyAclEntries(viewFsPath1, entries);
verify(mockFs1).modifyAclEntries(mockFsPath1, entries);
viewFs.modifyAclEntries(viewFsPath2, entries);
verify(mockFs2).modifyAclEntries(mockFsPath2, entries);
viewFs.removeAclEntries(viewFsPath1, entries);
verify(mockFs1).removeAclEntries(mockFsPath1, entries);
viewFs.removeAclEntries(viewFsPath2, entries);
verify(mockFs2).removeAclEntries(mockFsPath2, entries);
viewFs.removeDefaultAcl(viewFsPath1);
verify(mockFs1).removeDefaultAcl(mockFsPath1);
viewFs.removeDefaultAcl(viewFsPath2);
verify(mockFs2).removeDefaultAcl(mockFsPath2);
viewFs.removeAcl(viewFsPath1);
verify(mockFs1).removeAcl(mockFsPath1);
viewFs.removeAcl(viewFsPath2);
verify(mockFs2).removeAcl(mockFsPath2);
viewFs.setAcl(viewFsPath1, entries);
verify(mockFs1).setAcl(mockFsPath1, entries);
viewFs.setAcl(viewFsPath2, entries);
verify(mockFs2).setAcl(mockFsPath2, entries);
viewFs.getAclStatus(viewFsPath1);
verify(mockFs1).getAclStatus(mockFsPath1);
viewFs.getAclStatus(viewFsPath2);
verify(mockFs2).getAclStatus(mockFsPath2);
}
void checkVerifyChecksum(boolean flag) {
viewFs.setVerifyChecksum(flag);
assertEquals(flag, fs1.getVerifyChecksum());

View File

@ -477,6 +477,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<includes>
<include>ClientNamenodeProtocol.proto</include>
<include>NamenodeProtocol.proto</include>
<include>acl.proto</include>
</includes>
</source>
<output>${project.build.directory}/generated-sources/java</output>

View File

@ -115,9 +115,12 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@ -2641,6 +2644,95 @@ public ClientContext getClientContext() {
return clientContext;
}
void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
checkOpen();
try {
namenode.modifyAclEntries(src, aclSpec);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
checkOpen();
try {
namenode.removeAclEntries(src, aclSpec);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
void removeDefaultAcl(String src) throws IOException {
checkOpen();
try {
namenode.removeDefaultAcl(src);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
void removeAcl(String src) throws IOException {
checkOpen();
try {
namenode.removeAcl(src);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
checkOpen();
try {
namenode.setAcl(src, aclSpec);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
AclStatus getAclStatus(String src) throws IOException {
checkOpen();
try {
return namenode.getAclStatus(src);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr) throws IOException {
Peer peer = null;

View File

@ -72,6 +72,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_CLIENT_RETRY_TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT = 3;
public static final String DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH = "dfs.client.retry.interval-ms.get-last-block-length";
public static final int DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT = 4000;
public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
// HA related configuration
public static final String DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX = "dfs.client.failover.proxy.provider";
@ -184,6 +186,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
public static final String DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup";
public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
public static final String DFS_NAMENODE_ACLS_ENABLED_KEY = "dfs.namenode.acls.enabled";
public static final boolean DFS_NAMENODE_ACLS_ENABLED_DEFAULT = false;
public static final String DFS_ADMIN = "dfs.cluster.administrators";
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource";
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";

View File

@ -54,6 +54,8 @@
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@ -1787,4 +1789,130 @@ public void removeCachePool(String poolName) throws IOException {
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
return dfs.listCachePools();
}
/**
* {@inheritDoc}
*/
@Override
public void modifyAclEntries(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.modifyAclEntries(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.modifyAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void removeAclEntries(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.removeAclEntries(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.removeAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void removeDefaultAcl(Path path) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.removeDefaultAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.removeDefaultAcl(p);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void removeAcl(Path path) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.removeAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.removeAcl(p);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void setAcl(Path path, final List<AclEntry> aclSpec) throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.setAcl(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.setAcl(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public AclStatus getAclStatus(Path path) throws IOException {
final Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<AclStatus>() {
@Override
public AclStatus doCall(final Path p) throws IOException {
return dfs.getAclStatus(getPathName(p));
}
@Override
public AclStatus next(final FileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getAclStatus(p);
}
}.resolve(this, absF);
}
}

View File

@ -0,0 +1,39 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Indicates a failure manipulating an ACL.
*/
@InterfaceAudience.Private
public class AclException extends IOException {
private static final long serialVersionUID = 1L;
/**
* Creates a new AclException.
*
* @param message String message
*/
public AclException(String message) {
super(message);
}
}

View File

@ -20,6 +20,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -34,6 +35,8 @@
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@ -1184,4 +1187,49 @@ public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(
@Idempotent
public BatchedEntries<CachePoolEntry> listCachePools(String prevPool)
throws IOException;
/**
* Modifies ACL entries of files and directories. This method can add new ACL
* entries or modify the permissions on existing ACL entries. All existing
* ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.)
*/
@Idempotent
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException;
/**
* Removes ACL entries from files and directories. Other ACL entries are
* retained.
*/
@Idempotent
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException;
/**
* Removes all default ACL entries from files and directories.
*/
@Idempotent
public void removeDefaultAcl(String src) throws IOException;
/**
* Removes all but the base ACL entries of files and directories. The entries
* for user, group, and others are retained for compatibility with permission
* bits.
*/
@Idempotent
public void removeAcl(String src) throws IOException;
/**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*/
@Idempotent
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException;
/**
* Gets the ACLs of files and directories.
*/
@Idempotent
public AclStatus getAclStatus(String src) throws IOException;
}

View File

@ -113,7 +113,11 @@ public static enum Feature {
+ " Use distinct StorageUuid per storage directory."),
ADD_LAYOUT_FLAGS(-50, "Add support for layout flags."),
CACHING(-51, "Support for cache pools and path-based caching"),
PROTOBUF_FORMAT(-52, "Use protobuf to serialize FSImage");
// Hadoop 2.4.0
PROTOBUF_FORMAT(-52, "Use protobuf to serialize FSImage"),
EXTENDED_ACL(-53, "Extended ACL"),
RESERVED_REL2_4_0(-54, -51, "Reserved for release 2.4.0", true,
PROTOBUF_FORMAT, EXTENDED_ACL);
final int lv;
final int ancestorLV;

View File

@ -37,6 +37,18 @@
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@ -270,6 +282,24 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
VOID_SETBALANCERBANDWIDTH_RESPONSE =
SetBalancerBandwidthResponseProto.newBuilder().build();
private static final SetAclResponseProto
VOID_SETACL_RESPONSE = SetAclResponseProto.getDefaultInstance();
private static final ModifyAclEntriesResponseProto
VOID_MODIFYACLENTRIES_RESPONSE = ModifyAclEntriesResponseProto
.getDefaultInstance();
private static final RemoveAclEntriesResponseProto
VOID_REMOVEACLENTRIES_RESPONSE = RemoveAclEntriesResponseProto
.getDefaultInstance();
private static final RemoveDefaultAclResponseProto
VOID_REMOVEDEFAULTACL_RESPONSE = RemoveDefaultAclResponseProto
.getDefaultInstance();
private static final RemoveAclResponseProto
VOID_REMOVEACL_RESPONSE = RemoveAclResponseProto.getDefaultInstance();
/**
* Constructor
*
@ -1143,4 +1173,73 @@ public ListCachePoolsResponseProto listCachePools(RpcController controller,
throw new ServiceException(e);
}
}
@Override
public ModifyAclEntriesResponseProto modifyAclEntries(
RpcController controller, ModifyAclEntriesRequestProto req)
throws ServiceException {
try {
server.modifyAclEntries(req.getSrc(), PBHelper.convertAclEntry(req.getAclSpecList()));
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_MODIFYACLENTRIES_RESPONSE;
}
@Override
public RemoveAclEntriesResponseProto removeAclEntries(
RpcController controller, RemoveAclEntriesRequestProto req)
throws ServiceException {
try {
server.removeAclEntries(req.getSrc(),
PBHelper.convertAclEntry(req.getAclSpecList()));
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REMOVEACLENTRIES_RESPONSE;
}
@Override
public RemoveDefaultAclResponseProto removeDefaultAcl(
RpcController controller, RemoveDefaultAclRequestProto req)
throws ServiceException {
try {
server.removeDefaultAcl(req.getSrc());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REMOVEDEFAULTACL_RESPONSE;
}
@Override
public RemoveAclResponseProto removeAcl(RpcController controller,
RemoveAclRequestProto req) throws ServiceException {
try {
server.removeAcl(req.getSrc());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REMOVEACL_RESPONSE;
}
@Override
public SetAclResponseProto setAcl(RpcController controller,
SetAclRequestProto req) throws ServiceException {
try {
server.setAcl(req.getSrc(), PBHelper.convertAclEntry(req.getAclSpecList()));
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_SETACL_RESPONSE;
}
@Override
public GetAclStatusResponseProto getAclStatus(RpcController controller,
GetAclStatusRequestProto req) throws ServiceException {
try {
return PBHelper.convert(server.getAclStatus(req.getSrc()));
} catch (IOException e) {
throw new ServiceException(e);
}
}
}

View File

@ -22,6 +22,7 @@
import java.io.IOException;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -34,6 +35,8 @@
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@ -55,6 +58,12 @@
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
@ -1164,4 +1173,76 @@ public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
ModifyAclEntriesRequestProto req = ModifyAclEntriesRequestProto
.newBuilder().setSrc(src)
.addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec)).build();
try {
rpcProxy.modifyAclEntries(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
RemoveAclEntriesRequestProto req = RemoveAclEntriesRequestProto
.newBuilder().setSrc(src)
.addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec)).build();
try {
rpcProxy.removeAclEntries(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void removeDefaultAcl(String src) throws IOException {
RemoveDefaultAclRequestProto req = RemoveDefaultAclRequestProto
.newBuilder().setSrc(src).build();
try {
rpcProxy.removeDefaultAcl(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void removeAcl(String src) throws IOException {
RemoveAclRequestProto req = RemoveAclRequestProto.newBuilder()
.setSrc(src).build();
try {
rpcProxy.removeAcl(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
SetAclRequestProto req = SetAclRequestProto.newBuilder()
.setSrc(src)
.addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec))
.build();
try {
rpcProxy.setAcl(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public AclStatus getAclStatus(String src) throws IOException {
GetAclStatusRequestProto req = GetAclStatusRequestProto.newBuilder()
.setSrc(src).build();
try {
return PBHelper.convert(rpcProxy.getAclStatus(null, req));
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
}

View File

@ -33,6 +33,11 @@
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSUtil;
@ -61,6 +66,12 @@
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
@ -190,6 +201,13 @@ public class PBHelper {
RegisterCommandProto.newBuilder().build();
private static final RegisterCommand REG_CMD = new RegisterCommand();
private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES =
AclEntryScope.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES =
AclEntryType.values();
private static final FsAction[] FSACTION_VALUES =
FsAction.values();
private PBHelper() {
/** Hidden constructor */
}
@ -198,6 +216,10 @@ public static ByteString getByteString(byte[] bytes) {
return ByteString.copyFrom(bytes);
}
private static <T extends Enum<T>, U extends Enum<U>> U castEnum(T from, U[] to) {
return to[from.ordinal()];
}
public static NamenodeRole convert(NamenodeRoleProto role) {
switch (role) {
case NAMENODE:
@ -750,8 +772,9 @@ public static DatanodeCommand convert(DatanodeCommandProto proto) {
return REG_CMD;
case BlockIdCommand:
return PBHelper.convert(proto.getBlkIdCmd());
default:
return null;
}
return null;
}
public static BalancerBandwidthCommandProto convert(
@ -1887,5 +1910,74 @@ public static InputStream vintPrefixed(final InputStream input)
assert size >= 0;
return new ExactSizeInputStream(input, size);
}
private static AclEntryScopeProto convert(AclEntryScope v) {
return AclEntryScopeProto.valueOf(v.ordinal());
}
private static AclEntryScope convert(AclEntryScopeProto v) {
return castEnum(v, ACL_ENTRY_SCOPE_VALUES);
}
private static AclEntryTypeProto convert(AclEntryType e) {
return AclEntryTypeProto.valueOf(e.ordinal());
}
private static AclEntryType convert(AclEntryTypeProto v) {
return castEnum(v, ACL_ENTRY_TYPE_VALUES);
}
private static FsActionProto convert(FsAction v) {
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
}
private static FsAction convert(FsActionProto v) {
return castEnum(v, FSACTION_VALUES);
}
public static List<AclEntryProto> convertAclEntryProto(
List<AclEntry> aclSpec) {
ArrayList<AclEntryProto> r = Lists.newArrayListWithCapacity(aclSpec.size());
for (AclEntry e : aclSpec) {
AclEntryProto.Builder builder = AclEntryProto.newBuilder();
builder.setType(convert(e.getType()));
builder.setScope(convert(e.getScope()));
builder.setPermissions(convert(e.getPermission()));
if (e.getName() != null) {
builder.setName(e.getName());
}
r.add(builder.build());
}
return r;
}
public static List<AclEntry> convertAclEntry(List<AclEntryProto> aclSpec) {
ArrayList<AclEntry> r = Lists.newArrayListWithCapacity(aclSpec.size());
for (AclEntryProto e : aclSpec) {
AclEntry.Builder builder = new AclEntry.Builder();
builder.setType(convert(e.getType()));
builder.setScope(convert(e.getScope()));
builder.setPermission(convert(e.getPermissions()));
if (e.hasName()) {
builder.setName(e.getName());
}
r.add(builder.build());
}
return r;
}
public static AclStatus convert(GetAclStatusResponseProto e) {
AclStatusProto r = e.getResult();
return new AclStatus.Builder().owner(r.getOwner()).group(r.getGroup())
.stickyBit(r.getSticky())
.addEntries(convertAclEntry(r.getEntriesList())).build();
}
public static GetAclStatusResponseProto convert(AclStatus e) {
AclStatusProto r = AclStatusProto.newBuilder().setOwner(e.getOwner())
.setGroup(e.getGroup()).setSticky(e.isStickyBit())
.addAllEntries(convertAclEntryProto(e.getEntries())).build();
return GetAclStatusResponseProto.newBuilder().setResult(r).build();
}
}

View File

@ -0,0 +1,56 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.AclException;
/**
* Support for ACLs is controlled by a configuration flag. If the configuration
* flag is false, then the NameNode will reject all ACL-related operations.
*/
final class AclConfigFlag {
private final boolean enabled;
/**
* Creates a new AclConfigFlag from configuration.
*
* @param conf Configuration to check
*/
public AclConfigFlag(Configuration conf) {
enabled = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT);
LogFactory.getLog(AclConfigFlag.class).info("ACLs enabled? " + enabled);
}
/**
* Checks the flag on behalf of an ACL API call.
*
* @throws AclException if ACLs are disabled
*/
public void checkForApiCall() throws AclException {
if (!enabled) {
throw new AclException(String.format(
"The ACL operation has been rejected. "
+ "Support for ACLs has been disabled by setting %s to false.",
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY));
}
}
}

View File

@ -0,0 +1,43 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
import com.google.common.collect.ImmutableList;
/**
* Feature that represents the ACLs of the inode.
*/
@InterfaceAudience.Private
public class AclFeature implements INode.Feature {
public static final ImmutableList<AclEntry> EMPTY_ENTRY_LIST =
ImmutableList.of();
private final ImmutableList<AclEntry> entries;
public AclFeature(ImmutableList<AclEntry> entries) {
this.entries = entries;
}
public ImmutableList<AclEntry> getEntries() {
return entries;
}
}

View File

@ -0,0 +1,406 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Collections;
import java.util.List;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
/**
* AclStorage contains utility methods that define how ACL data is stored in the
* namespace.
*
* If an inode has an ACL, then the ACL bit is set in the inode's
* {@link FsPermission} and the inode also contains an {@link AclFeature}. For
* the access ACL, the owner and other entries are identical to the owner and
* other bits stored in FsPermission, so we reuse those. The access mask entry
* is stored into the group permission bits of FsPermission. This is consistent
* with other file systems' implementations of ACLs and eliminates the need for
* special handling in various parts of the codebase. For example, if a user
* calls chmod to change group permission bits on a file with an ACL, then the
* expected behavior is to change the ACL's mask entry. By saving the mask entry
* into the group permission bits, chmod continues to work correctly without
* special handling. All remaining access entries (named users and named groups)
* are stored as explicit {@link AclEntry} instances in a list inside the
* AclFeature. Additionally, all default entries are stored in the AclFeature.
*
* The methods in this class encapsulate these rules for reading or writing the
* ACL entries to the appropriate location.
*
* The methods in this class assume that input ACL entry lists have already been
* validated and sorted according to the rules enforced by
* {@link AclTransformation}.
*/
@InterfaceAudience.Private
final class AclStorage {
/**
* If a default ACL is defined on a parent directory, then copies that default
* ACL to a newly created child file or directory.
*
* @param child INode newly created child
*/
public static void copyINodeDefaultAcl(INode child) {
INodeDirectory parent = child.getParent();
AclFeature parentAclFeature = parent.getAclFeature();
if (parentAclFeature == null || !(child.isFile() || child.isDirectory())) {
return;
}
// Split parent's entries into access vs. default.
List<AclEntry> featureEntries = parent.getAclFeature().getEntries();
ScopedAclEntries scopedEntries = new ScopedAclEntries(featureEntries);
List<AclEntry> parentDefaultEntries = scopedEntries.getDefaultEntries();
// The parent may have an access ACL but no default ACL. If so, exit.
if (parentDefaultEntries.isEmpty()) {
return;
}
// Pre-allocate list size for access entries to copy from parent.
List<AclEntry> accessEntries = Lists.newArrayListWithCapacity(
parentDefaultEntries.size());
FsPermission childPerm = child.getFsPermission();
// Copy each default ACL entry from parent to new child's access ACL.
boolean parentDefaultIsMinimal = isMinimalAcl(parentDefaultEntries);
for (AclEntry entry: parentDefaultEntries) {
AclEntryType type = entry.getType();
String name = entry.getName();
AclEntry.Builder builder = new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(type)
.setName(name);
// The child's initial permission bits are treated as the mode parameter,
// which can filter copied permission values for owner, mask and other.
final FsAction permission;
if (type == AclEntryType.USER && name == null) {
permission = entry.getPermission().and(childPerm.getUserAction());
} else if (type == AclEntryType.GROUP && parentDefaultIsMinimal) {
// This only happens if the default ACL is a minimal ACL: exactly 3
// entries corresponding to owner, group and other. In this case,
// filter the group permissions.
permission = entry.getPermission().and(childPerm.getGroupAction());
} else if (type == AclEntryType.MASK) {
// Group bits from mode parameter filter permission of mask entry.
permission = entry.getPermission().and(childPerm.getGroupAction());
} else if (type == AclEntryType.OTHER) {
permission = entry.getPermission().and(childPerm.getOtherAction());
} else {
permission = entry.getPermission();
}
builder.setPermission(permission);
accessEntries.add(builder.build());
}
// A new directory also receives a copy of the parent's default ACL.
List<AclEntry> defaultEntries = child.isDirectory() ? parentDefaultEntries :
Collections.<AclEntry>emptyList();
final FsPermission newPerm;
if (!isMinimalAcl(accessEntries) || !defaultEntries.isEmpty()) {
// Save the new ACL to the child.
child.addAclFeature(createAclFeature(accessEntries, defaultEntries));
newPerm = createFsPermissionForExtendedAcl(accessEntries, childPerm);
} else {
// The child is receiving a minimal ACL.
newPerm = createFsPermissionForMinimalAcl(accessEntries, childPerm);
}
child.setPermission(newPerm);
}
/**
* Reads the existing extended ACL entries of an inode. This method returns
* only the extended ACL entries stored in the AclFeature. If the inode does
* not have an ACL, then this method returns an empty list. This method
* supports querying by snapshot ID.
*
* @param inode INode to read
* @param snapshotId int ID of snapshot to read
* @return List<AclEntry> containing extended inode ACL entries
*/
public static List<AclEntry> readINodeAcl(INode inode, int snapshotId) {
AclFeature f = inode.getAclFeature(snapshotId);
return f == null ? ImmutableList.<AclEntry> of() : f.getEntries();
}
/**
* Reads the existing ACL of an inode. This method always returns the full
* logical ACL of the inode after reading relevant data from the inode's
* {@link FsPermission} and {@link AclFeature}. Note that every inode
* logically has an ACL, even if no ACL has been set explicitly. If the inode
* does not have an extended ACL, then the result is a minimal ACL consising of
* exactly 3 entries that correspond to the owner, group and other permissions.
* This method always reads the inode's current state and does not support
* querying by snapshot ID. This is because the method is intended to support
* ACL modification APIs, which always apply a delta on top of current state.
*
* @param inode INode to read
* @return List<AclEntry> containing all logical inode ACL entries
*/
public static List<AclEntry> readINodeLogicalAcl(INode inode) {
FsPermission perm = inode.getFsPermission();
AclFeature f = inode.getAclFeature();
if (f == null) {
return getMinimalAcl(perm);
}
final List<AclEntry> existingAcl;
// Split ACL entries stored in the feature into access vs. default.
List<AclEntry> featureEntries = f.getEntries();
ScopedAclEntries scoped = new ScopedAclEntries(featureEntries);
List<AclEntry> accessEntries = scoped.getAccessEntries();
List<AclEntry> defaultEntries = scoped.getDefaultEntries();
// Pre-allocate list size for the explicit entries stored in the feature
// plus the 3 implicit entries (owner, group and other) from the permission
// bits.
existingAcl = Lists.newArrayListWithCapacity(featureEntries.size() + 3);
if (!accessEntries.isEmpty()) {
// Add owner entry implied from user permission bits.
existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER).setPermission(perm.getUserAction())
.build());
// Next add all named user and group entries taken from the feature.
existingAcl.addAll(accessEntries);
// Add mask entry implied from group permission bits.
existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.MASK).setPermission(perm.getGroupAction())
.build());
// Add other entry implied from other permission bits.
existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER).setPermission(perm.getOtherAction())
.build());
} else {
// It's possible that there is a default ACL but no access ACL. In this
// case, add the minimal access ACL implied by the permission bits.
existingAcl.addAll(getMinimalAcl(perm));
}
// Add all default entries after the access entries.
existingAcl.addAll(defaultEntries);
// The above adds entries in the correct order, so no need to sort here.
return existingAcl;
}
/**
* Completely removes the ACL from an inode.
*
* @param inode INode to update
* @param snapshotId int latest snapshot ID of inode
* @throws QuotaExceededException if quota limit is exceeded
*/
public static void removeINodeAcl(INode inode, int snapshotId)
throws QuotaExceededException {
AclFeature f = inode.getAclFeature();
if (f == null) {
return;
}
FsPermission perm = inode.getFsPermission();
List<AclEntry> featureEntries = f.getEntries();
if (featureEntries.get(0).getScope() == AclEntryScope.ACCESS) {
// Restore group permissions from the feature's entry to permission
// bits, overwriting the mask, which is not part of a minimal ACL.
AclEntry groupEntryKey = new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS).setType(AclEntryType.GROUP).build();
int groupEntryIndex = Collections.binarySearch(featureEntries,
groupEntryKey, AclTransformation.ACL_ENTRY_COMPARATOR);
assert groupEntryIndex >= 0;
FsAction groupPerm = featureEntries.get(groupEntryIndex).getPermission();
FsPermission newPerm = new FsPermission(perm.getUserAction(), groupPerm,
perm.getOtherAction(), perm.getStickyBit());
inode.setPermission(newPerm, snapshotId);
}
inode.removeAclFeature(snapshotId);
}
/**
* Updates an inode with a new ACL. This method takes a full logical ACL and
* stores the entries to the inode's {@link FsPermission} and
* {@link AclFeature}.
*
* @param inode INode to update
* @param newAcl List<AclEntry> containing new ACL entries
* @param snapshotId int latest snapshot ID of inode
* @throws AclException if the ACL is invalid for the given inode
* @throws QuotaExceededException if quota limit is exceeded
*/
public static void updateINodeAcl(INode inode, List<AclEntry> newAcl,
int snapshotId) throws AclException, QuotaExceededException {
assert newAcl.size() >= 3;
FsPermission perm = inode.getFsPermission();
final FsPermission newPerm;
if (!isMinimalAcl(newAcl)) {
// This is an extended ACL. Split entries into access vs. default.
ScopedAclEntries scoped = new ScopedAclEntries(newAcl);
List<AclEntry> accessEntries = scoped.getAccessEntries();
List<AclEntry> defaultEntries = scoped.getDefaultEntries();
// Only directories may have a default ACL.
if (!defaultEntries.isEmpty() && !inode.isDirectory()) {
throw new AclException(
"Invalid ACL: only directories may have a default ACL.");
}
// Attach entries to the feature.
if (inode.getAclFeature() != null) {
inode.removeAclFeature(snapshotId);
}
inode.addAclFeature(createAclFeature(accessEntries, defaultEntries),
snapshotId);
newPerm = createFsPermissionForExtendedAcl(accessEntries, perm);
} else {
// This is a minimal ACL. Remove the ACL feature if it previously had one.
if (inode.getAclFeature() != null) {
inode.removeAclFeature(snapshotId);
}
newPerm = createFsPermissionForMinimalAcl(newAcl, perm);
}
inode.setPermission(newPerm, snapshotId);
}
/**
* There is no reason to instantiate this class.
*/
private AclStorage() {
}
/**
* Creates an AclFeature from the given ACL entries.
*
* @param accessEntries List<AclEntry> access ACL entries
* @param defaultEntries List<AclEntry> default ACL entries
* @return AclFeature containing the required ACL entries
*/
private static AclFeature createAclFeature(List<AclEntry> accessEntries,
List<AclEntry> defaultEntries) {
// Pre-allocate list size for the explicit entries stored in the feature,
// which is all entries minus the 3 entries implicitly stored in the
// permission bits.
List<AclEntry> featureEntries = Lists.newArrayListWithCapacity(
(accessEntries.size() - 3) + defaultEntries.size());
// For the access ACL, the feature only needs to hold the named user and
// group entries. For a correctly sorted ACL, these will be in a
// predictable range.
if (!isMinimalAcl(accessEntries)) {
featureEntries.addAll(
accessEntries.subList(1, accessEntries.size() - 2));
}
// Add all default entries to the feature.
featureEntries.addAll(defaultEntries);
return new AclFeature(ImmutableList.copyOf(featureEntries));
}
/**
* Creates the new FsPermission for an inode that is receiving an extended
* ACL, based on its access ACL entries. For a correctly sorted ACL, the
* first entry is the owner and the last 2 entries are the mask and other
* entries respectively. Also preserve sticky bit and toggle ACL bit on.
*
* @param accessEntries List<AclEntry> access ACL entries
* @param existingPerm FsPermission existing permissions
* @return FsPermission new permissions
*/
private static FsPermission createFsPermissionForExtendedAcl(
List<AclEntry> accessEntries, FsPermission existingPerm) {
return new FsPermission(accessEntries.get(0).getPermission(),
accessEntries.get(accessEntries.size() - 2).getPermission(),
accessEntries.get(accessEntries.size() - 1).getPermission(),
existingPerm.getStickyBit());
}
/**
* Creates the new FsPermission for an inode that is receiving a minimal ACL,
* based on its access ACL entries. For a correctly sorted ACL, the owner,
* group and other permissions are in order. Also preserve sticky bit and
* toggle ACL bit off.
*
* @param accessEntries List<AclEntry> access ACL entries
* @param existingPerm FsPermission existing permissions
* @return FsPermission new permissions
*/
private static FsPermission createFsPermissionForMinimalAcl(
List<AclEntry> accessEntries, FsPermission existingPerm) {
return new FsPermission(accessEntries.get(0).getPermission(),
accessEntries.get(1).getPermission(),
accessEntries.get(2).getPermission(),
existingPerm.getStickyBit());
}
/**
* Translates the given permission bits to the equivalent minimal ACL.
*
* @param perm FsPermission to translate
* @return List<AclEntry> containing exactly 3 entries representing the owner,
* group and other permissions
*/
private static List<AclEntry> getMinimalAcl(FsPermission perm) {
return Lists.newArrayList(
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.setPermission(perm.getUserAction())
.build(),
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.GROUP)
.setPermission(perm.getGroupAction())
.build(),
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.setPermission(perm.getOtherAction())
.build());
}
/**
* Checks if the given entries represent a minimal ACL (contains exactly 3
* entries).
*
* @param entries List<AclEntry> entries to check
* @return boolean true if the entries represent a minimal ACL
*/
private static boolean isMinimalAcl(List<AclEntry> entries) {
return entries.size() == 3;
}
}

View File

@ -0,0 +1,485 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import com.google.common.base.Objects;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Ordering;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AclException;
/**
* AclTransformation defines the operations that can modify an ACL. All ACL
* modifications take as input an existing ACL and apply logic to add new
* entries, modify existing entries or remove old entries. Some operations also
* accept an ACL spec: a list of entries that further describes the requested
* change. Different operations interpret the ACL spec differently. In the
* case of adding an ACL to an inode that previously did not have one, the
* existing ACL can be a "minimal ACL" containing exactly 3 entries for owner,
* group and other, all derived from the {@link FsPermission} bits.
*
* The algorithms implemented here require sorted lists of ACL entries. For any
* existing ACL, it is assumed that the entries are sorted. This is because all
* ACL creation and modification is intended to go through these methods, and
* they all guarantee correct sort order in their outputs. However, an ACL spec
* is considered untrusted user input, so all operations pre-sort the ACL spec as
* the first step.
*/
@InterfaceAudience.Private
final class AclTransformation {
private static final int MAX_ENTRIES = 32;
/**
* Filters (discards) any existing ACL entries that have the same scope, type
* and name of any entry in the ACL spec. If necessary, recalculates the mask
* entries. If necessary, default entries may be inferred by copying the
* permissions of the corresponding access entries. It is invalid to request
* removal of the mask entry from an ACL that would otherwise require a mask
* entry, due to existing named entries or an unnamed group entry.
*
* @param existingAcl List<AclEntry> existing ACL
* @param inAclSpec List<AclEntry> ACL spec describing entries to filter
* @return List<AclEntry> new ACL
* @throws AclException if validation fails
*/
public static List<AclEntry> filterAclEntriesByAclSpec(
List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
if (aclSpec.containsKey(existingEntry)) {
scopeDirty.add(existingEntry.getScope());
if (existingEntry.getType() == MASK) {
maskDirty.add(existingEntry.getScope());
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
/**
* Filters (discards) any existing default ACL entries. The new ACL retains
* only the access ACL entries.
*
* @param existingAcl List<AclEntry> existing ACL
* @return List<AclEntry> new ACL
* @throws AclException if validation fails
*/
public static List<AclEntry> filterDefaultAclEntries(
List<AclEntry> existingAcl) throws AclException {
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
for (AclEntry existingEntry: existingAcl) {
if (existingEntry.getScope() == DEFAULT) {
// Default entries sort after access entries, so we can exit early.
break;
}
aclBuilder.add(existingEntry);
}
return buildAndValidateAcl(aclBuilder);
}
/**
* Merges the entries of the ACL spec into the existing ACL. If necessary,
* recalculates the mask entries. If necessary, default entries may be
* inferred by copying the permissions of the corresponding access entries.
*
* @param existingAcl List<AclEntry> existing ACL
* @param inAclSpec List<AclEntry> ACL spec containing entries to merge
* @return List<AclEntry> new ACL
* @throws AclException if validation fails
*/
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
List<AclEntry> foundAclSpecEntries =
Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry);
if (aclSpecEntry != null) {
foundAclSpecEntries.add(aclSpecEntry);
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
// ACL spec entries that were not replacements are new additions.
for (AclEntry newEntry: aclSpec) {
if (Collections.binarySearch(foundAclSpecEntries, newEntry,
ACL_ENTRY_COMPARATOR) < 0) {
scopeDirty.add(newEntry.getScope());
if (newEntry.getType() == MASK) {
providedMask.put(newEntry.getScope(), newEntry);
maskDirty.add(newEntry.getScope());
} else {
aclBuilder.add(newEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
/**
* Completely replaces the ACL with the entries of the ACL spec. If
* necessary, recalculates the mask entries. If necessary, default entries
* are inferred by copying the permissions of the corresponding access
* entries. Replacement occurs separately for each of the access ACL and the
* default ACL. If the ACL spec contains only access entries, then the
* existing default entries are retained. If the ACL spec contains only
* default entries, then the existing access entries are retained. If the ACL
* spec contains both access and default entries, then both are replaced.
*
* @param existingAcl List<AclEntry> existing ACL
* @param inAclSpec List<AclEntry> ACL spec containing replacement entries
* @return List<AclEntry> new ACL
* @throws AclException if validation fails
*/
public static List<AclEntry> replaceAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
// Replacement is done separately for each scope: access and default.
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry aclSpecEntry: aclSpec) {
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
}
// Copy existing entries if the scope was not replaced.
for (AclEntry existingEntry: existingAcl) {
if (!scopeDirty.contains(existingEntry.getScope())) {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
/**
* There is no reason to instantiate this class.
*/
private AclTransformation() {
}
/**
* Comparator that enforces required ordering for entries within an ACL:
* -owner entry (unnamed user)
* -all named user entries (internal ordering undefined)
* -owning group entry (unnamed group)
* -all named group entries (internal ordering undefined)
* -mask entry
* -other entry
* All access ACL entries sort ahead of all default ACL entries.
*/
static final Comparator<AclEntry> ACL_ENTRY_COMPARATOR =
new Comparator<AclEntry>() {
@Override
public int compare(AclEntry entry1, AclEntry entry2) {
return ComparisonChain.start()
.compare(entry1.getScope(), entry2.getScope(),
Ordering.explicit(ACCESS, DEFAULT))
.compare(entry1.getType(), entry2.getType(),
Ordering.explicit(USER, GROUP, MASK, OTHER))
.compare(entry1.getName(), entry2.getName(),
Ordering.natural().nullsFirst())
.result();
}
};
/**
* Builds the final list of ACL entries to return by trimming, sorting and
* validating the ACL entries that have been added.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
* @return List<AclEntry> unmodifiable, sorted list of ACL entries
* @throws AclException if validation fails
*/
private static List<AclEntry> buildAndValidateAcl(
ArrayList<AclEntry> aclBuilder) throws AclException {
if (aclBuilder.size() > MAX_ENTRIES) {
throw new AclException("Invalid ACL: ACL has " + aclBuilder.size() +
" entries, which exceeds maximum of " + MAX_ENTRIES + ".");
}
aclBuilder.trimToSize();
Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR);
// Full iteration to check for duplicates and invalid named entries.
AclEntry prevEntry = null;
for (AclEntry entry: aclBuilder) {
if (prevEntry != null &&
ACL_ENTRY_COMPARATOR.compare(prevEntry, entry) == 0) {
throw new AclException(
"Invalid ACL: multiple entries with same scope, type and name.");
}
if (entry.getName() != null && (entry.getType() == MASK ||
entry.getType() == OTHER)) {
throw new AclException(
"Invalid ACL: this entry type must not have a name: " + entry + ".");
}
prevEntry = entry;
}
// Search for the required base access entries. If there is a default ACL,
// then do the same check on the default entries.
ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
for (AclEntryType type: EnumSet.of(USER, GROUP, OTHER)) {
AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS)
.setType(type).build();
if (Collections.binarySearch(scopedEntries.getAccessEntries(),
accessEntryKey, ACL_ENTRY_COMPARATOR) < 0) {
throw new AclException(
"Invalid ACL: the user, group and other entries are required.");
}
if (!scopedEntries.getDefaultEntries().isEmpty()) {
AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT)
.setType(type).build();
if (Collections.binarySearch(scopedEntries.getDefaultEntries(),
defaultEntryKey, ACL_ENTRY_COMPARATOR) < 0) {
throw new AclException(
"Invalid default ACL: the user, group and other entries are required.");
}
}
}
return Collections.unmodifiableList(aclBuilder);
}
/**
* Calculates mask entries required for the ACL. Mask calculation is performed
* separately for each scope: access and default. This method is responsible
* for handling the following cases of mask calculation:
* 1. Throws an exception if the caller attempts to remove the mask entry of an
* existing ACL that requires it. If the ACL has any named entries, then a
* mask entry is required.
* 2. If the caller supplied a mask in the ACL spec, use it.
* 3. If the caller did not supply a mask, but there are ACL entry changes in
* this scope, then automatically calculate a new mask. The permissions of
* the new mask are the union of the permissions on the group entry and all
* named entries.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
* @param providedMask EnumMap<AclEntryScope, AclEntry> mapping each scope to
* the mask entry that was provided for that scope (if provided)
* @param maskDirty EnumSet<AclEntryScope> which contains a scope if the mask
* entry is dirty (added or deleted) in that scope
* @param scopeDirty EnumSet<AclEntryScope> which contains a scope if any entry
* is dirty (added or deleted) in that scope
* @throws AclException if validation fails
*/
private static void calculateMasks(List<AclEntry> aclBuilder,
EnumMap<AclEntryScope, AclEntry> providedMask,
EnumSet<AclEntryScope> maskDirty, EnumSet<AclEntryScope> scopeDirty)
throws AclException {
EnumSet<AclEntryScope> scopeFound = EnumSet.noneOf(AclEntryScope.class);
EnumMap<AclEntryScope, FsAction> unionPerms =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskNeeded = EnumSet.noneOf(AclEntryScope.class);
// Determine which scopes are present, which scopes need a mask, and the
// union of group class permissions in each scope.
for (AclEntry entry: aclBuilder) {
scopeFound.add(entry.getScope());
if (entry.getType() == GROUP || entry.getName() != null) {
FsAction scopeUnionPerms = Objects.firstNonNull(
unionPerms.get(entry.getScope()), FsAction.NONE);
unionPerms.put(entry.getScope(),
scopeUnionPerms.or(entry.getPermission()));
}
if (entry.getName() != null) {
maskNeeded.add(entry.getScope());
}
}
// Add mask entry if needed in each scope.
for (AclEntryScope scope: scopeFound) {
if (!providedMask.containsKey(scope) && maskNeeded.contains(scope) &&
maskDirty.contains(scope)) {
// Caller explicitly removed mask entry, but it's required.
throw new AclException(
"Invalid ACL: mask is required, but it was deleted.");
} else if (providedMask.containsKey(scope) &&
(!scopeDirty.contains(scope) || maskDirty.contains(scope))) {
// Caller explicitly provided new mask, or we are preserving the existing
// mask in an unchanged scope.
aclBuilder.add(providedMask.get(scope));
} else if (maskNeeded.contains(scope) || providedMask.containsKey(scope)) {
// Otherwise, if there are maskable entries present, or the ACL
// previously had a mask, then recalculate a mask automatically.
aclBuilder.add(new AclEntry.Builder()
.setScope(scope)
.setType(MASK)
.setPermission(unionPerms.get(scope))
.build());
}
}
}
/**
* Adds unspecified default entries by copying permissions from the
* corresponding access entries.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
*/
private static void copyDefaultsIfNeeded(List<AclEntry> aclBuilder) {
Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR);
ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
if (!scopedEntries.getDefaultEntries().isEmpty()) {
List<AclEntry> accessEntries = scopedEntries.getAccessEntries();
List<AclEntry> defaultEntries = scopedEntries.getDefaultEntries();
List<AclEntry> copiedEntries = Lists.newArrayListWithCapacity(3);
for (AclEntryType type: EnumSet.of(USER, GROUP, OTHER)) {
AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT)
.setType(type).build();
int defaultEntryIndex = Collections.binarySearch(defaultEntries,
defaultEntryKey, ACL_ENTRY_COMPARATOR);
if (defaultEntryIndex < 0) {
AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS)
.setType(type).build();
int accessEntryIndex = Collections.binarySearch(accessEntries,
accessEntryKey, ACL_ENTRY_COMPARATOR);
if (accessEntryIndex >= 0) {
copiedEntries.add(new AclEntry.Builder()
.setScope(DEFAULT)
.setType(type)
.setPermission(accessEntries.get(accessEntryIndex).getPermission())
.build());
}
}
}
// Add all copied entries when done to prevent potential issues with binary
// search on a modified aclBulider during the main loop.
aclBuilder.addAll(copiedEntries);
}
}
/**
* An ACL spec that has been pre-validated and sorted.
*/
private static final class ValidatedAclSpec implements Iterable<AclEntry> {
private final List<AclEntry> aclSpec;
/**
* Creates a ValidatedAclSpec by pre-validating and sorting the given ACL
* entries. Pre-validation checks that it does not exceed the maximum
* entries. This check is performed before modifying the ACL, and it's
* actually insufficient for enforcing the maximum number of entries.
* Transformation logic can create additional entries automatically,such as
* the mask and some of the default entries, so we also need additional
* checks during transformation. The up-front check is still valuable here
* so that we don't run a lot of expensive transformation logic while
* holding the namesystem lock for an attacker who intentionally sent a huge
* ACL spec.
*
* @param aclSpec List<AclEntry> containing unvalidated input ACL spec
* @throws AclException if validation fails
*/
public ValidatedAclSpec(List<AclEntry> aclSpec) throws AclException {
if (aclSpec.size() > MAX_ENTRIES) {
throw new AclException("Invalid ACL: ACL spec has " + aclSpec.size() +
" entries, which exceeds maximum of " + MAX_ENTRIES + ".");
}
Collections.sort(aclSpec, ACL_ENTRY_COMPARATOR);
this.aclSpec = aclSpec;
}
/**
* Returns true if this contains an entry matching the given key. An ACL
* entry's key consists of scope, type and name (but not permission).
*
* @param key AclEntry search key
* @return boolean true if found
*/
public boolean containsKey(AclEntry key) {
return Collections.binarySearch(aclSpec, key, ACL_ENTRY_COMPARATOR) >= 0;
}
/**
* Returns the entry matching the given key or null if not found. An ACL
* entry's key consists of scope, type and name (but not permission).
*
* @param key AclEntry search key
* @return AclEntry entry matching the given key or null if not found
*/
public AclEntry findByKey(AclEntry key) {
int index = Collections.binarySearch(aclSpec, key, ACL_ENTRY_COMPARATOR);
if (index >= 0) {
return aclSpec.get(index);
}
return null;
}
@Override
public Iterator<AclEntry> iterator() {
return aclSpec.iterator();
}
}
}

View File

@ -39,12 +39,15 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -283,7 +286,7 @@ INodeFile addFile(String path, PermissionStatus permissions,
short replication, long preferredBlockSize, String clientName,
String clientMachine, DatanodeDescriptor clientNode)
throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException {
UnresolvedLinkException, SnapshotAccessControlException, AclException {
waitForReady();
// Always do an implicit mkdirs for parent directory tree.
@ -325,6 +328,7 @@ INodeFile addFile(String path, PermissionStatus permissions,
INodeFile unprotectedAddFile( long id,
String path,
PermissionStatus permissions,
List<AclEntry> aclEntries,
short replication,
long modificationTime,
long atime,
@ -347,6 +351,10 @@ INodeFile unprotectedAddFile( long id,
try {
if (addINode(path, newNode)) {
if (aclEntries != null) {
AclStorage.updateINodeAcl(newNode, aclEntries,
Snapshot.CURRENT_STATE_ID);
}
return newNode;
}
} catch (IOException e) {
@ -1168,7 +1176,8 @@ void unprotectedSetPermission(String src, FsPermission permissions)
if (inode == null) {
throw new FileNotFoundException("File does not exist: " + src);
}
inode.setPermission(permissions, inodesInPath.getLatestSnapshotId());
int snapshotId = inodesInPath.getLatestSnapshotId();
inode.setPermission(permissions, snapshotId);
}
void setOwner(String src, String username, String groupname)
@ -1615,6 +1624,14 @@ HdfsFileStatus getFileInfo(String src, boolean resolveLink)
*/
private HdfsFileStatus getFileInfo4DotSnapshot(String src)
throws UnresolvedLinkException {
if (getINode4DotSnapshot(src) != null) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0);
}
return null;
}
private INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {
Preconditions.checkArgument(
src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
"%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
@ -1626,8 +1643,7 @@ private HdfsFileStatus getFileInfo4DotSnapshot(String src)
if (node != null
&& node.isDirectory()
&& node.asDirectory() instanceof INodeDirectorySnapshottable) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0);
return node;
}
return null;
}
@ -1906,7 +1922,8 @@ static String getFullPathName(INode inode) {
boolean mkdirs(String src, PermissionStatus permissions,
boolean inheritPermission, long now)
throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException {
UnresolvedLinkException, SnapshotAccessControlException,
AclException {
src = normalizePath(src);
String[] names = INode.getPathNames(src);
byte[][] components = INode.getPathComponents(names);
@ -1969,7 +1986,7 @@ boolean mkdirs(String src, PermissionStatus permissions,
pathbuilder.append(Path.SEPARATOR + names[i]);
unprotectedMkdir(namesystem.allocateNewInodeId(), iip, i,
components[i], (i < lastInodeIndex) ? parentPermissions
: permissions, now);
: permissions, null, now);
if (inodes[i] == null) {
return false;
}
@ -1992,14 +2009,14 @@ boolean mkdirs(String src, PermissionStatus permissions,
}
INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions,
long timestamp) throws QuotaExceededException,
UnresolvedLinkException {
List<AclEntry> aclEntries, long timestamp)
throws QuotaExceededException, UnresolvedLinkException, AclException {
assert hasWriteLock();
byte[][] components = INode.getPathComponents(src);
INodesInPath iip = getExistingPathINodes(components);
INode[] inodes = iip.getINodes();
final int pos = inodes.length - 1;
unprotectedMkdir(inodeId, iip, pos, components[pos], permissions,
unprotectedMkdir(inodeId, iip, pos, components[pos], permissions, aclEntries,
timestamp);
return inodes[pos];
}
@ -2009,12 +2026,16 @@ INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions,
* All ancestors exist. Newly created one stored at index pos.
*/
private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
int pos, byte[] name, PermissionStatus permission, long timestamp)
throws QuotaExceededException {
int pos, byte[] name, PermissionStatus permission,
List<AclEntry> aclEntries, long timestamp)
throws QuotaExceededException, AclException {
assert hasWriteLock();
final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
timestamp);
if (addChild(inodesInPath, pos, dir, true)) {
if (aclEntries != null) {
AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
}
inodesInPath.setINode(pos, dir);
}
}
@ -2239,6 +2260,7 @@ private boolean addChild(INodesInPath iip, int pos,
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
} else {
iip.setINode(pos - 1, child.getParent());
AclStorage.copyINodeDefaultAcl(child);
addToInodeMap(child);
}
return added;
@ -2625,7 +2647,7 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path,
INodeSymlink addSymlink(String path, String target,
PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
throws UnresolvedLinkException, FileAlreadyExistsException,
QuotaExceededException, SnapshotAccessControlException {
QuotaExceededException, SnapshotAccessControlException, AclException {
waitForReady();
final long modTime = now();
@ -2669,7 +2691,154 @@ INodeSymlink unprotectedAddSymlink(long id, String path, String target,
target);
return addINode(path, symlink) ? symlink : null;
}
void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
writeLock();
try {
List<AclEntry> newAcl = unprotectedModifyAclEntries(src, aclSpec);
fsImage.getEditLog().logSetAcl(src, newAcl);
} finally {
writeUnlock();
}
}
private List<AclEntry> unprotectedModifyAclEntries(String src,
List<AclEntry> aclSpec) throws IOException {
assert hasWriteLock();
INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.mergeAclEntries(existingAcl,
aclSpec);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
return newAcl;
}
void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
writeLock();
try {
List<AclEntry> newAcl = unprotectedRemoveAclEntries(src, aclSpec);
fsImage.getEditLog().logSetAcl(src, newAcl);
} finally {
writeUnlock();
}
}
private List<AclEntry> unprotectedRemoveAclEntries(String src,
List<AclEntry> aclSpec) throws IOException {
assert hasWriteLock();
INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.filterAclEntriesByAclSpec(
existingAcl, aclSpec);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
return newAcl;
}
void removeDefaultAcl(String src) throws IOException {
writeLock();
try {
List<AclEntry> newAcl = unprotectedRemoveDefaultAcl(src);
fsImage.getEditLog().logSetAcl(src, newAcl);
} finally {
writeUnlock();
}
}
private List<AclEntry> unprotectedRemoveDefaultAcl(String src)
throws IOException {
assert hasWriteLock();
INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.filterDefaultAclEntries(
existingAcl);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
return newAcl;
}
void removeAcl(String src) throws IOException {
writeLock();
try {
unprotectedRemoveAcl(src);
fsImage.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
} finally {
writeUnlock();
}
}
private void unprotectedRemoveAcl(String src) throws IOException {
assert hasWriteLock();
INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getLatestSnapshotId();
AclStorage.removeINodeAcl(inode, snapshotId);
}
void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
writeLock();
try {
List<AclEntry> newAcl = unprotectedSetAcl(src, aclSpec);
fsImage.getEditLog().logSetAcl(src, newAcl);
} finally {
writeUnlock();
}
}
List<AclEntry> unprotectedSetAcl(String src, List<AclEntry> aclSpec)
throws IOException {
// ACL removal is logged to edits as OP_SET_ACL with an empty list.
if (aclSpec.isEmpty()) {
unprotectedRemoveAcl(src);
return AclFeature.EMPTY_ENTRY_LIST;
}
assert hasWriteLock();
INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.replaceAclEntries(existingAcl,
aclSpec);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
return newAcl;
}
AclStatus getAclStatus(String src) throws IOException {
String srcs = normalizePath(src);
readLock();
try {
// There is no real inode for the path ending in ".snapshot", so return a
// non-null, unpopulated AclStatus. This is similar to getFileInfo.
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR) &&
getINode4DotSnapshot(srcs) != null) {
return new AclStatus.Builder().owner("").group("").build();
}
INodesInPath iip = rootDir.getLastINodeInPath(srcs, true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getPathSnapshotId();
List<AclEntry> acl = AclStorage.readINodeAcl(inode, snapshotId);
return new AclStatus.Builder()
.owner(inode.getUserName()).group(inode.getGroupName())
.stickyBit(inode.getFsPermission(snapshotId).getStickyBit())
.addEntries(acl).build();
} finally {
readUnlock();
}
}
private static INode resolveLastINode(String src, INodesInPath iip)
throws FileNotFoundException {
INode inode = iip.getLastINode();
if (inode == null)
throw new FileNotFoundException("cannot find " + src);
return inode;
}
/**
* Caches frequently used file names to reuse file name objects and
* reduce heap size.

View File

@ -33,7 +33,9 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -70,6 +72,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
@ -681,6 +684,7 @@ private void logRpcIds(FSEditLogOp op, boolean toLogRpcIds) {
*/
public void logOpenFile(String path, INodeFile newNode, boolean toLogRpcIds) {
Preconditions.checkArgument(newNode.isUnderConstruction());
PermissionStatus permissions = newNode.getPermissionStatus();
AddOp op = AddOp.getInstance(cache.get())
.setInodeId(newNode.getId())
.setPath(path)
@ -689,9 +693,14 @@ public void logOpenFile(String path, INodeFile newNode, boolean toLogRpcIds) {
.setAccessTime(newNode.getAccessTime())
.setBlockSize(newNode.getPreferredBlockSize())
.setBlocks(newNode.getBlocks())
.setPermissionStatus(newNode.getPermissionStatus())
.setPermissionStatus(permissions)
.setClientName(newNode.getFileUnderConstructionFeature().getClientName())
.setClientMachine(newNode.getFileUnderConstructionFeature().getClientMachine());
AclFeature f = newNode.getAclFeature();
if (f != null) {
op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
}
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
@ -736,11 +745,17 @@ public void logUpdateBlocks(String path, INodeFile file, boolean toLogRpcIds) {
* Add create directory record to edit log
*/
public void logMkDir(String path, INode newNode) {
PermissionStatus permissions = newNode.getPermissionStatus();
MkdirOp op = MkdirOp.getInstance(cache.get())
.setInodeId(newNode.getId())
.setPath(path)
.setTimestamp(newNode.getModificationTime())
.setPermissionStatus(newNode.getPermissionStatus());
.setPermissionStatus(permissions);
AclFeature f = newNode.getAclFeature();
if (f != null) {
op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
}
logEdit(op);
}
@ -1014,6 +1029,13 @@ void logRemoveCachePool(String poolName, boolean toLogRpcIds) {
logEdit(op);
}
void logSetAcl(String src, List<AclEntry> entries) {
SetAclOp op = SetAclOp.getInstance();
op.src = src;
op.aclEntries = entries;
logEdit(op);
}
/**
* Get all the journals this edit log is currently operating on.
*/

View File

@ -67,6 +67,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp;
@ -323,9 +324,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion,
lastInodeId);
newFile = fsDir.unprotectedAddFile(inodeId,
path, addCloseOp.permissions, replication,
addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true,
addCloseOp.clientName, addCloseOp.clientMachine);
path, addCloseOp.permissions, addCloseOp.aclEntries,
replication, addCloseOp.mtime, addCloseOp.atime,
addCloseOp.blockSize, true, addCloseOp.clientName,
addCloseOp.clientMachine);
fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);
// add the op into retry cache if necessary
@ -485,7 +487,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
lastInodeId);
fsDir.unprotectedMkdir(inodeId,
renameReservedPathsOnUpgrade(mkdirOp.path, logVersion),
mkdirOp.permissions, mkdirOp.timestamp);
mkdirOp.permissions, mkdirOp.aclEntries, mkdirOp.timestamp);
break;
}
case OP_SET_GENSTAMP_V1: {
@ -744,6 +746,11 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
}
break;
}
case OP_SET_ACL: {
SetAclOp setAclOp = (SetAclOp) op;
fsDir.unprotectedSetAcl(setAclOp.src, setAclOp.aclEntries);
break;
}
default:
throw new IOException("Invalid operation read " + op.opCode);
}

View File

@ -44,6 +44,7 @@
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_OLD;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_SNAPSHOT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_ACL;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_GENSTAMP_V1;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_GENSTAMP_V2;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_NS_QUOTA;
@ -76,6 +77,10 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -87,6 +92,8 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
@ -109,6 +116,8 @@
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
/**
* Helper classes for reading the ops from an InputStream.
@ -170,6 +179,7 @@ public OpInstanceCache() {
inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp());
inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp());
inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp());
inst.put(OP_SET_ACL, new SetAclOp());
}
public FSEditLogOp get(FSEditLogOpCodes opcode) {
@ -177,6 +187,16 @@ public FSEditLogOp get(FSEditLogOpCodes opcode) {
}
}
private static ImmutableMap<String, FsAction> fsActionMap() {
ImmutableMap.Builder<String, FsAction> b = ImmutableMap.builder();
for (FsAction v : FsAction.values())
b.put(v.SYMBOL, v);
return b.build();
}
private static final ImmutableMap<String, FsAction> FSACTION_SYMBOL_MAP
= fsActionMap();
/**
* Constructor for an EditLog Op. EditLog ops cannot be constructed
* directly, but only through Reader#readOp.
@ -278,7 +298,76 @@ private static void appendRpcIdsToXml(ContentHandler contentHandler,
XMLUtils.addSaxString(contentHandler, "RPC_CALLID",
Integer.valueOf(callId).toString());
}
private static final class AclEditLogUtil {
private static final int ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET = 6;
private static final int ACL_EDITLOG_ENTRY_TYPE_OFFSET = 3;
private static final int ACL_EDITLOG_ENTRY_SCOPE_OFFSET = 5;
private static final int ACL_EDITLOG_PERM_MASK = 7;
private static final int ACL_EDITLOG_ENTRY_TYPE_MASK = 3;
private static final int ACL_EDITLOG_ENTRY_SCOPE_MASK = 1;
private static final FsAction[] FSACTION_VALUES = FsAction.values();
private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope
.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
.values();
private static List<AclEntry> read(DataInputStream in, int logVersion)
throws IOException {
if (!LayoutVersion.supports(Feature.EXTENDED_ACL, logVersion)) {
return null;
}
int size = in.readInt();
if (size == 0) {
return null;
}
List<AclEntry> aclEntries = Lists.newArrayListWithCapacity(size);
for (int i = 0; i < size; ++i) {
int v = in.read();
int p = v & ACL_EDITLOG_PERM_MASK;
int t = (v >> ACL_EDITLOG_ENTRY_TYPE_OFFSET)
& ACL_EDITLOG_ENTRY_TYPE_MASK;
int s = (v >> ACL_EDITLOG_ENTRY_SCOPE_OFFSET)
& ACL_EDITLOG_ENTRY_SCOPE_MASK;
boolean hasName = ((v >> ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET) & 1) == 1;
String name = hasName ? FSImageSerialization.readString(in) : null;
aclEntries.add(new AclEntry.Builder().setName(name)
.setPermission(FSACTION_VALUES[p])
.setScope(ACL_ENTRY_SCOPE_VALUES[s])
.setType(ACL_ENTRY_TYPE_VALUES[t]).build());
}
return aclEntries;
}
private static void write(List<AclEntry> aclEntries, DataOutputStream out)
throws IOException {
if (aclEntries == null) {
out.writeInt(0);
return;
}
out.writeInt(aclEntries.size());
for (AclEntry e : aclEntries) {
boolean hasName = e.getName() != null;
int v = (e.getScope().ordinal() << ACL_EDITLOG_ENTRY_SCOPE_OFFSET)
| (e.getType().ordinal() << ACL_EDITLOG_ENTRY_TYPE_OFFSET)
| e.getPermission().ordinal();
if (hasName) {
v |= 1 << ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET;
}
out.write(v);
if (hasName) {
FSImageSerialization.writeString(e.getName(), out);
}
}
}
}
@SuppressWarnings("unchecked")
static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatingOp {
int length;
@ -290,6 +379,7 @@ static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatin
long blockSize;
Block[] blocks;
PermissionStatus permissions;
List<AclEntry> aclEntries;
String clientName;
String clientMachine;
@ -352,6 +442,11 @@ <T extends AddCloseOp> T setPermissionStatus(PermissionStatus permissions) {
return (T)this;
}
<T extends AddCloseOp> T setAclEntries(List<AclEntry> aclEntries) {
this.aclEntries = aclEntries;
return (T)this;
}
<T extends AddCloseOp> T setClientName(String clientName) {
this.clientName = clientName;
return (T)this;
@ -374,6 +469,7 @@ public void writeFields(DataOutputStream out) throws IOException {
permissions.write(out);
if (this.opCode == OP_ADD) {
AclEditLogUtil.write(aclEntries, out);
FSImageSerialization.writeString(clientName,out);
FSImageSerialization.writeString(clientMachine,out);
// write clientId and callId
@ -432,6 +528,7 @@ void readFields(DataInputStream in, int logVersion)
// clientname, clientMachine and block locations of last block.
if (this.opCode == OP_ADD) {
aclEntries = AclEditLogUtil.read(in, logVersion);
this.clientName = FSImageSerialization.readString(in);
this.clientMachine = FSImageSerialization.readString(in);
// read clientId and callId
@ -483,6 +580,8 @@ public String stringifyMembers() {
builder.append(Arrays.toString(blocks));
builder.append(", permissions=");
builder.append(permissions);
builder.append(", aclEntries=");
builder.append(aclEntries);
builder.append(", clientName=");
builder.append(clientName);
builder.append(", clientMachine=");
@ -520,6 +619,9 @@ protected void toXml(ContentHandler contentHandler) throws SAXException {
}
FSEditLogOp.permissionStatusToXml(contentHandler, permissions);
if (this.opCode == OP_ADD) {
if (aclEntries != null) {
appendAclEntriesToXml(contentHandler, aclEntries);
}
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
}
@ -545,6 +647,7 @@ void fromXml(Stanza st) throws InvalidXmlException {
this.blocks = new Block[0];
}
this.permissions = permissionStatusFromXml(st);
aclEntries = readAclEntriesFromXml(st);
readRpcIdsFromXml(st);
}
}
@ -1211,6 +1314,7 @@ static class MkdirOp extends FSEditLogOp {
String path;
long timestamp;
PermissionStatus permissions;
List<AclEntry> aclEntries;
private MkdirOp() {
super(OP_MKDIR);
@ -1240,6 +1344,11 @@ MkdirOp setPermissionStatus(PermissionStatus permissions) {
return this;
}
MkdirOp setAclEntries(List<AclEntry> aclEntries) {
this.aclEntries = aclEntries;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
@ -1248,6 +1357,7 @@ void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeLong(timestamp, out); // mtime
FSImageSerialization.writeLong(timestamp, out); // atime, unused at this
permissions.write(out);
AclEditLogUtil.write(aclEntries, out);
}
@Override
@ -1285,6 +1395,7 @@ void readFields(DataInputStream in, int logVersion) throws IOException {
}
this.permissions = PermissionStatus.read(in);
aclEntries = AclEditLogUtil.read(in, logVersion);
}
@Override
@ -1300,6 +1411,8 @@ public String toString() {
builder.append(timestamp);
builder.append(", permissions=");
builder.append(permissions);
builder.append(", aclEntries=");
builder.append(aclEntries);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
@ -1318,6 +1431,9 @@ protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "TIMESTAMP",
Long.valueOf(timestamp).toString());
FSEditLogOp.permissionStatusToXml(contentHandler, permissions);
if (aclEntries != null) {
appendAclEntriesToXml(contentHandler, aclEntries);
}
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
@ -1326,6 +1442,7 @@ protected void toXml(ContentHandler contentHandler) throws SAXException {
this.path = st.getValue("PATH");
this.timestamp = Long.valueOf(st.getValue("TIMESTAMP"));
this.permissions = permissionStatusFromXml(st);
aclEntries = readAclEntriesFromXml(st);
}
}
@ -3338,6 +3455,50 @@ public String toString() {
}
}
static class SetAclOp extends FSEditLogOp {
List<AclEntry> aclEntries = Lists.newArrayList();
String src;
private SetAclOp() {
super(OP_SET_ACL);
}
static SetAclOp getInstance() {
return new SetAclOp();
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
AclEditLogProto p = AclEditLogProto.parseDelimitedFrom((DataInputStream)in);
src = p.getSrc();
aclEntries = PBHelper.convertAclEntry(p.getEntriesList());
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
AclEditLogProto.Builder b = AclEditLogProto.newBuilder();
if (src != null)
b.setSrc(src);
b.addAllEntries(PBHelper.convertAclEntryProto(aclEntries));
b.build().writeDelimitedTo(out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
appendAclEntriesToXml(contentHandler, aclEntries);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
src = st.getValue("SRC");
aclEntries = readAclEntriesFromXml(st);
if (aclEntries == null) {
aclEntries = Lists.newArrayList();
}
}
}
static private short readShort(DataInputStream in) throws IOException {
return Short.parseShort(FSImageSerialization.readString(in));
}
@ -3747,4 +3908,45 @@ public static FsPermission fsPermissionFromXml(Stanza st)
short mode = Short.valueOf(st.getValue("MODE"));
return new FsPermission(mode);
}
private static void fsActionToXml(ContentHandler contentHandler, FsAction v)
throws SAXException {
XMLUtils.addSaxString(contentHandler, "PERM", v.SYMBOL);
}
private static FsAction fsActionFromXml(Stanza st) throws InvalidXmlException {
FsAction v = FSACTION_SYMBOL_MAP.get(st.getValue("PERM"));
if (v == null)
throw new InvalidXmlException("Invalid value for FsAction");
return v;
}
private static void appendAclEntriesToXml(ContentHandler contentHandler,
List<AclEntry> aclEntries) throws SAXException {
for (AclEntry e : aclEntries) {
contentHandler.startElement("", "", "ENTRY", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "SCOPE", e.getScope().name());
XMLUtils.addSaxString(contentHandler, "TYPE", e.getType().name());
XMLUtils.addSaxString(contentHandler, "NAME", e.getName());
fsActionToXml(contentHandler, e.getPermission());
contentHandler.endElement("", "", "ENTRY");
}
}
private static List<AclEntry> readAclEntriesFromXml(Stanza st) {
List<AclEntry> aclEntries = Lists.newArrayList();
if (!st.hasChildren("ENTRY"))
return null;
List<Stanza> stanzas = st.getChildren("ENTRY");
for (Stanza s : stanzas) {
AclEntry e = new AclEntry.Builder()
.setScope(AclEntryScope.valueOf(s.getValue("SCOPE")))
.setType(AclEntryType.valueOf(s.getValue("TYPE")))
.setName(s.getValue("NAME"))
.setPermission(fsActionFromXml(s)).build();
aclEntries.add(e);
}
return aclEntries;
}
}

View File

@ -67,6 +67,7 @@ public enum FSEditLogOpCodes {
OP_MODIFY_CACHE_POOL ((byte) 37),
OP_REMOVE_CACHE_POOL ((byte) 38),
OP_MODIFY_CACHE_DIRECTIVE ((byte) 39),
OP_SET_ACL ((byte) 40),
// Note that the current range of the valid OP code is 0~127
OP_INVALID ((byte) -1);

View File

@ -772,12 +772,10 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
modificationTime, atime, blocks, replication, blockSize);
if (underConstruction) {
file.toUnderConstruction(clientName, clientMachine, null);
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
} else {
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
}
} else if (numBlocks == -1) {
//directory
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
} else if (numBlocks == -1) {
//directory
//read quotas
final long nsQuota = in.readLong();
@ -867,8 +865,8 @@ public INodeFileAttributes loadINodeFileAttributes(DataInput in)
final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort());
final long preferredBlockSize = in.readLong();
return new INodeFileAttributes.SnapshotCopy(name, permissions, modificationTime,
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
accessTime, replication, preferredBlockSize);
}
@ -889,9 +887,9 @@ public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
final long dsQuota = in.readLong();
return nsQuota == -1L && dsQuota == -1L?
new INodeDirectoryAttributes.SnapshotCopy(name, permissions, modificationTime)
new INodeDirectoryAttributes.SnapshotCopy(name, permissions, null, modificationTime)
: new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
modificationTime, nsQuota, dsQuota);
null, modificationTime, nsQuota, dsQuota);
}
private void loadFilesUnderConstruction(DataInput in,

View File

@ -30,6 +30,10 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
@ -38,15 +42,18 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.protobuf.ByteString;
@InterfaceAudience.Private
@ -56,6 +63,19 @@ public final class FSImageFormatPBINode {
private final static int GROUP_STRID_OFFSET = 16;
private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class);
private static final int ACL_ENTRY_NAME_MASK = (1 << 24) - 1;
private static final int ACL_ENTRY_NAME_OFFSET = 6;
private static final int ACL_ENTRY_TYPE_OFFSET = 3;
private static final int ACL_ENTRY_SCOPE_OFFSET = 5;
private static final int ACL_ENTRY_PERM_MASK = 7;
private static final int ACL_ENTRY_TYPE_MASK = 3;
private static final int ACL_ENTRY_SCOPE_MASK = 1;
private static final FsAction[] FSACTION_VALUES = FsAction.values();
private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope
.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
.values();
public final static class Loader {
public static PermissionStatus loadPermission(long id,
final String[] stringTable) {
@ -66,13 +86,30 @@ public static PermissionStatus loadPermission(long id,
new FsPermission(perm));
}
public static ImmutableList<AclEntry> loadAclEntries(
AclFeatureProto proto, final String[] stringTable) {
ImmutableList.Builder<AclEntry> b = ImmutableList.builder();
for (int v : proto.getEntriesList()) {
int p = v & ACL_ENTRY_PERM_MASK;
int t = (v >> ACL_ENTRY_TYPE_OFFSET) & ACL_ENTRY_TYPE_MASK;
int s = (v >> ACL_ENTRY_SCOPE_OFFSET) & ACL_ENTRY_SCOPE_MASK;
int nid = (v >> ACL_ENTRY_NAME_OFFSET) & ACL_ENTRY_NAME_MASK;
String name = stringTable[nid];
b.add(new AclEntry.Builder().setName(name)
.setPermission(FSACTION_VALUES[p])
.setScope(ACL_ENTRY_SCOPE_VALUES[s])
.setType(ACL_ENTRY_TYPE_VALUES[t]).build());
}
return b.build();
}
public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
final String[] stringTable) {
LoaderContext state) {
assert n.getType() == INodeSection.INode.Type.DIRECTORY;
INodeSection.INodeDirectory d = n.getDirectory();
final PermissionStatus permissions = loadPermission(d.getPermission(),
stringTable);
state.getStringTable());
final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName()
.toByteArray(), permissions, d.getModificationTime());
@ -80,6 +117,11 @@ public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
if (nsQuota >= 0 || dsQuota >= 0) {
dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
}
if (d.hasAcl()) {
dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(),
state.getStringTable())));
}
return dir;
}
@ -181,7 +223,7 @@ private INode loadINode(INodeSection.INode n) {
case FILE:
return loadINodeFile(n);
case DIRECTORY:
return loadINodeDirectory(n, parent.getLoaderContext().getStringTable());
return loadINodeDirectory(n, parent.getLoaderContext());
case SYMLINK:
return loadINodeSymlink(n);
default:
@ -195,6 +237,7 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
INodeSection.INodeFile f = n.getFile();
List<BlockProto> bp = f.getBlocksList();
short replication = (short) f.getReplication();
LoaderContext state = parent.getLoaderContext();
BlockInfo[] blocks = new BlockInfo[bp.size()];
for (int i = 0, e = bp.size(); i < e; ++i) {
@ -206,6 +249,12 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
final INodeFile file = new INodeFile(n.getId(),
n.getName().toByteArray(), permissions, f.getModificationTime(),
f.getAccessTime(), blocks, replication, f.getPreferredBlockSize());
if (f.hasAcl()) {
file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(),
state.getStringTable())));
}
// under-construction information
if (f.hasFileUC()) {
INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
@ -234,8 +283,7 @@ private INodeSymlink loadINodeSymlink(INodeSection.INode n) {
}
private void loadRootINode(INodeSection.INode p) {
INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext()
.getStringTable());
INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
final Quota.Counts q = root.getQuotaCounts();
final long nsQuota = q.get(Quota.NAMESPACE);
final long dsQuota = q.get(Quota.DISKSPACE);
@ -257,27 +305,48 @@ private static long buildPermissionStatus(INodeAttributes n,
| n.getFsPermissionShort();
}
private static AclFeatureProto.Builder buildAclEntries(AclFeature f,
final SaverContext.DeduplicationMap<String> map) {
AclFeatureProto.Builder b = AclFeatureProto.newBuilder();
for (AclEntry e : f.getEntries()) {
int v = ((map.getId(e.getName()) & ACL_ENTRY_NAME_MASK) << ACL_ENTRY_NAME_OFFSET)
| (e.getType().ordinal() << ACL_ENTRY_TYPE_OFFSET)
| (e.getScope().ordinal() << ACL_ENTRY_SCOPE_OFFSET)
| (e.getPermission().ordinal());
b.addEntries(v);
}
return b;
}
public static INodeSection.INodeFile.Builder buildINodeFile(
INodeFileAttributes file,
final SaverContext.DeduplicationMap<String> stringMap) {
INodeFileAttributes file, final SaverContext state) {
INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder()
.setAccessTime(file.getAccessTime())
.setModificationTime(file.getModificationTime())
.setPermission(buildPermissionStatus(file, stringMap))
.setPermission(buildPermissionStatus(file, state.getStringMap()))
.setPreferredBlockSize(file.getPreferredBlockSize())
.setReplication(file.getFileReplication());
AclFeature f = file.getAclFeature();
if (f != null) {
b.setAcl(buildAclEntries(f, state.getStringMap()));
}
return b;
}
public static INodeSection.INodeDirectory.Builder buildINodeDirectory(
INodeDirectoryAttributes dir,
final SaverContext.DeduplicationMap<String> stringMap) {
INodeDirectoryAttributes dir, final SaverContext state) {
Quota.Counts quota = dir.getQuotaCounts();
INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory
.newBuilder().setModificationTime(dir.getModificationTime())
.setNsQuota(quota.get(Quota.NAMESPACE))
.setDsQuota(quota.get(Quota.DISKSPACE))
.setPermission(buildPermissionStatus(dir, stringMap));
.setPermission(buildPermissionStatus(dir, state.getStringMap()));
AclFeature f = dir.getAclFeature();
if (f != null) {
b.setAcl(buildAclEntries(f, state.getStringMap()));
}
return b;
}
@ -378,7 +447,7 @@ private void save(OutputStream out, INode n) throws IOException {
private void save(OutputStream out, INodeDirectory n) throws IOException {
INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n,
parent.getSaverContext().getStringMap());
parent.getSaverContext());
INodeSection.INode r = buildINodeCommon(n)
.setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build();
r.writeDelimitedTo(out);
@ -386,7 +455,7 @@ private void save(OutputStream out, INodeDirectory n) throws IOException {
private void save(OutputStream out, INodeFile n) throws IOException {
INodeSection.INodeFile.Builder b = buildINodeFile(n,
parent.getSaverContext().getStringMap());
parent.getSaverContext());
for (Block block : n.getBlocks()) {
b.addBlocks(PBHelper.convert(block));
@ -407,12 +476,14 @@ private void save(OutputStream out, INodeFile n) throws IOException {
}
private void save(OutputStream out, INodeSymlink n) throws IOException {
SaverContext state = parent.getSaverContext();
INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
.newBuilder()
.setPermission(buildPermissionStatus(n, parent.getSaverContext().getStringMap()))
.setPermission(buildPermissionStatus(n, state.getStringMap()))
.setTarget(ByteString.copyFrom(n.getSymlink()))
.setModificationTime(n.getModificationTime())
.setAccessTime(n.getAccessTime());
INodeSection.INode r = buildINodeCommon(n)
.setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build();
r.writeDelimitedTo(out);

View File

@ -116,9 +116,11 @@ Set<Entry<E, Integer>> entrySet() {
return map.entrySet();
}
}
private final DeduplicationMap<String> stringMap = DeduplicationMap.newMap();
private final ArrayList<INodeReference> refList = Lists.newArrayList();
private final DeduplicationMap<String> stringMap = DeduplicationMap
.newMap();
public DeduplicationMap<String> getStringMap() {
return stringMap;
}
@ -547,6 +549,7 @@ private void saveStringTableSection(FileSummary.Builder summary)
public enum SectionName {
NS_INFO("NS_INFO"),
STRING_TABLE("STRING_TABLE"),
EXTENDED_ACL("EXTENDED_ACL"),
INODE("INODE"),
INODE_REFERENCE("INODE_REFERENCE"),
SNAPSHOT("SNAPSHOT"),

View File

@ -143,6 +143,8 @@
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
@ -490,7 +492,9 @@ private void logAuditEvent(boolean succeeded,
private INodeId inodeId;
private final RetryCache retryCache;
private final AclConfigFlag aclConfigFlag;
/**
* Set the last allocated inode id when fsimage or editlog is loaded.
*/
@ -757,6 +761,7 @@ public static FSNamesystem loadFromDisk(Configuration conf)
this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
auditLoggers.get(0) instanceof DefaultAuditLogger;
this.retryCache = ignoreRetryCache ? null : initRetryCache(conf);
this.aclConfigFlag = new AclConfigFlag(conf);
} catch(IOException e) {
LOG.error(getClass().getSimpleName() + " initialization failed.", e);
close();
@ -7365,6 +7370,127 @@ public BatchedListEntries<CachePoolEntry> listCachePools(String prevKey)
return results;
}
void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
aclConfigFlag.checkForApiCall();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src);
dir.modifyAclEntries(src, aclSpec);
resultingStat = getAuditFileInfo(src, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "modifyAclEntries", src, null, resultingStat);
}
void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
aclConfigFlag.checkForApiCall();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove ACL entries on " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src);
dir.removeAclEntries(src, aclSpec);
resultingStat = getAuditFileInfo(src, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "removeAclEntries", src, null, resultingStat);
}
void removeDefaultAcl(String src) throws IOException {
aclConfigFlag.checkForApiCall();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src);
dir.removeDefaultAcl(src);
resultingStat = getAuditFileInfo(src, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "removeDefaultAcl", src, null, resultingStat);
}
void removeAcl(String src) throws IOException {
aclConfigFlag.checkForApiCall();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove ACL on " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src);
dir.removeAcl(src);
resultingStat = getAuditFileInfo(src, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "removeAcl", src, null, resultingStat);
}
void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
aclConfigFlag.checkForApiCall();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set ACL on " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src);
dir.setAcl(src, aclSpec);
resultingStat = getAuditFileInfo(src, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "setAcl", src, null, resultingStat);
}
AclStatus getAclStatus(String src) throws IOException {
aclConfigFlag.checkForApiCall();
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
if (isPermissionEnabled) {
checkPermission(pc, src, false, null, null, null, null);
}
return dir.getAclStatus(src);
} finally {
readUnlock();
}
}
/**
* Default AuditLogger implementation; used when no access logger is
* defined in the config file. It can also be explicitly listed in the

View File

@ -20,16 +20,21 @@
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.Stack;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
/**
* Class that helps in checking file system permission.
@ -42,12 +47,27 @@ class FSPermissionChecker {
static final Log LOG = LogFactory.getLog(UserGroupInformation.class);
/** @return a string for throwing {@link AccessControlException} */
private static String toAccessControlString(INode inode) {
return "\"" + inode.getFullPathName() + "\":"
+ inode.getUserName() + ":" + inode.getGroupName()
+ ":" + (inode.isDirectory()? "d": "-") + inode.getFsPermission();
private String toAccessControlString(INode inode, int snapshotId,
FsAction access, FsPermission mode) {
return toAccessControlString(inode, snapshotId, access, mode, null);
}
/** @return a string for throwing {@link AccessControlException} */
private String toAccessControlString(INode inode, int snapshotId,
FsAction access, FsPermission mode, List<AclEntry> featureEntries) {
StringBuilder sb = new StringBuilder("Permission denied: ")
.append("user=").append(user).append(", ")
.append("access=").append(access).append(", ")
.append("inode=\"").append(inode.getFullPathName()).append("\":")
.append(inode.getUserName(snapshotId)).append(':')
.append(inode.getGroupName(snapshotId)).append(':')
.append(inode.isDirectory() ? 'd' : '-')
.append(mode);
if (featureEntries != null) {
sb.append(':').append(StringUtils.join(",", featureEntries));
}
return sb.toString();
}
private final UserGroupInformation ugi;
private final String user;
@ -219,7 +239,20 @@ private void check(INode inode, int snapshotId, FsAction access
return;
}
FsPermission mode = inode.getFsPermission(snapshotId);
AclFeature aclFeature = inode.getAclFeature(snapshotId);
if (aclFeature != null) {
List<AclEntry> featureEntries = aclFeature.getEntries();
// It's possible that the inode has a default ACL but no access ACL.
if (featureEntries.get(0).getScope() == AclEntryScope.ACCESS) {
checkAccessAcl(inode, snapshotId, access, mode, featureEntries);
return;
}
}
checkFsPermission(inode, snapshotId, access, mode);
}
private void checkFsPermission(INode inode, int snapshotId, FsAction access,
FsPermission mode) throws AccessControlException {
if (user.equals(inode.getUserName(snapshotId))) { //user class
if (mode.getUserAction().implies(access)) { return; }
}
@ -229,8 +262,88 @@ else if (groups.contains(inode.getGroupName(snapshotId))) { //group class
else { //other class
if (mode.getOtherAction().implies(access)) { return; }
}
throw new AccessControlException("Permission denied: user=" + user
+ ", access=" + access + ", inode=" + toAccessControlString(inode));
throw new AccessControlException(
toAccessControlString(inode, snapshotId, access, mode));
}
/**
* Checks requested access against an Access Control List. This method relies
* on finding the ACL data in the relevant portions of {@link FsPermission} and
* {@link AclFeature} as implemented in the logic of {@link AclStorage}. This
* method also relies on receiving the ACL entries in sorted order. This is
* assumed to be true, because the ACL modification methods in
* {@link AclTransformation} sort the resulting entries.
*
* More specifically, this method depends on these invariants in an ACL:
* - The list must be sorted.
* - Each entry in the list must be unique by scope + type + name.
* - There is exactly one each of the unnamed user/group/other entries.
* - The mask entry must not have a name.
* - The other entry must not have a name.
* - Default entries may be present, but they are ignored during enforcement.
*
* @param inode INode accessed inode
* @param snapshotId int snapshot ID
* @param access FsAction requested permission
* @param mode FsPermission mode from inode
* @param featureEntries List<AclEntry> ACL entries from AclFeature of inode
* @throws AccessControlException if the ACL denies permission
*/
private void checkAccessAcl(INode inode, int snapshotId, FsAction access,
FsPermission mode, List<AclEntry> featureEntries)
throws AccessControlException {
boolean foundMatch = false;
// Use owner entry from permission bits if user is owner.
if (user.equals(inode.getUserName(snapshotId))) {
if (mode.getUserAction().implies(access)) {
return;
}
foundMatch = true;
}
// Check named user and group entries if user was not denied by owner entry.
if (!foundMatch) {
for (AclEntry entry: featureEntries) {
if (entry.getScope() == AclEntryScope.DEFAULT) {
break;
}
AclEntryType type = entry.getType();
String name = entry.getName();
if (type == AclEntryType.USER) {
// Use named user entry with mask from permission bits applied if user
// matches name.
if (user.equals(name)) {
FsAction masked = entry.getPermission().and(mode.getGroupAction());
if (masked.implies(access)) {
return;
}
foundMatch = true;
}
} else if (type == AclEntryType.GROUP) {
// Use group entry (unnamed or named) with mask from permission bits
// applied if user is a member and entry grants access. If user is a
// member of multiple groups that have entries that grant access, then
// it doesn't matter which is chosen, so exit early after first match.
String group = name == null ? inode.getGroupName(snapshotId) : name;
if (groups.contains(group)) {
FsAction masked = entry.getPermission().and(mode.getGroupAction());
if (masked.implies(access)) {
return;
}
foundMatch = true;
}
}
}
}
// Use other entry if user was not denied by an earlier match.
if (!foundMatch && mode.getOtherAction().implies(access)) {
return;
}
throw new AccessControlException(
toAccessControlString(inode, snapshotId, access, mode, featureEntries));
}
/** Guarded by {@link FSNamesystem#readLock()} */

View File

@ -154,6 +154,31 @@ INode setPermission(FsPermission permission, int latestSnapshotId)
return nodeToUpdate;
}
abstract AclFeature getAclFeature(int snapshotId);
@Override
public final AclFeature getAclFeature() {
return getAclFeature(Snapshot.CURRENT_STATE_ID);
}
abstract void addAclFeature(AclFeature aclFeature);
final INode addAclFeature(AclFeature aclFeature, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.addAclFeature(aclFeature);
return nodeToUpdate;
}
abstract void removeAclFeature();
final INode removeAclFeature(int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.removeAclFeature();
return nodeToUpdate;
}
/**
* @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID},
* return this; otherwise return the corresponding snapshot inode.

View File

@ -48,6 +48,9 @@ public interface INodeAttributes {
/** @return the permission information as a long. */
public long getPermissionLong();
/** @return the ACL feature. */
public AclFeature getAclFeature();
/** @return the modification time. */
public long getModificationTime();
@ -58,13 +61,15 @@ public interface INodeAttributes {
public static abstract class SnapshotCopy implements INodeAttributes {
private final byte[] name;
private final long permission;
private final AclFeature aclFeature;
private final long modificationTime;
private final long accessTime;
SnapshotCopy(byte[] name, PermissionStatus permissions,
long modificationTime, long accessTime) {
AclFeature aclFeature, long modificationTime, long accessTime) {
this.name = name;
this.permission = PermissionStatusFormat.toLong(permissions);
this.aclFeature = aclFeature;
this.modificationTime = modificationTime;
this.accessTime = accessTime;
}
@ -72,6 +77,7 @@ public static abstract class SnapshotCopy implements INodeAttributes {
SnapshotCopy(INode inode) {
this.name = inode.getLocalNameBytes();
this.permission = inode.getPermissionLong();
this.aclFeature = inode.getAclFeature();
this.modificationTime = inode.getModificationTime();
this.accessTime = inode.getAccessTime();
}
@ -108,6 +114,11 @@ public long getPermissionLong() {
return permission;
}
@Override
public AclFeature getAclFeature() {
return aclFeature;
}
@Override
public final long getModificationTime() {
return modificationTime;

View File

@ -77,8 +77,11 @@ public INodeDirectory(long id, byte[] name, PermissionStatus permissions,
* @param other The INodeDirectory to be copied
* @param adopt Indicate whether or not need to set the parent field of child
* INodes to the new node
* @param featuresToCopy any number of features to copy to the new node.
* The method will do a reference copy, not a deep copy.
*/
public INodeDirectory(INodeDirectory other, boolean adopt, boolean copyFeatures) {
public INodeDirectory(INodeDirectory other, boolean adopt,
Feature... featuresToCopy) {
super(other);
this.children = other.children;
if (adopt && this.children != null) {
@ -86,9 +89,7 @@ public INodeDirectory(INodeDirectory other, boolean adopt, boolean copyFeatures)
child.setParent(this);
}
}
if (copyFeatures) {
this.features = other.features;
}
this.features = featuresToCopy;
}
/** @return true unconditionally. */
@ -145,12 +146,7 @@ public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify)
* otherwise, return null.
*/
public final DirectoryWithQuotaFeature getDirectoryWithQuotaFeature() {
for (Feature f : features) {
if (f instanceof DirectoryWithQuotaFeature) {
return (DirectoryWithQuotaFeature)f;
}
}
return null;
return getFeature(DirectoryWithQuotaFeature.class);
}
/** Is this directory with quota? */
@ -185,12 +181,7 @@ public DirectoryWithSnapshotFeature addSnapshotFeature(
* otherwise, return null.
*/
public final DirectoryWithSnapshotFeature getDirectoryWithSnapshotFeature() {
for (Feature f : features) {
if (f instanceof DirectoryWithSnapshotFeature) {
return (DirectoryWithSnapshotFeature) f;
}
}
return null;
return getFeature(DirectoryWithSnapshotFeature.class);
}
/** Is this file has the snapshot feature? */
@ -231,7 +222,8 @@ public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
Preconditions.checkState(getClass() != INodeDirectory.class,
"the class is already INodeDirectory, this=%s", this);
return replaceSelf(new INodeDirectory(this, true, true), inodeMap);
return replaceSelf(new INodeDirectory(this, true, this.getFeatures()),
inodeMap);
}
/** Replace itself with the given directory. */

View File

@ -35,8 +35,8 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
implements INodeDirectoryAttributes {
public SnapshotCopy(byte[] name, PermissionStatus permissions,
long modificationTime) {
super(name, permissions, modificationTime, 0L);
AclFeature aclFeature, long modificationTime) {
super(name, permissions, aclFeature, modificationTime, 0L);
}
public SnapshotCopy(INodeDirectory dir) {
@ -62,8 +62,9 @@ public static class CopyWithQuota extends INodeDirectoryAttributes.SnapshotCopy
public CopyWithQuota(byte[] name, PermissionStatus permissions,
long modificationTime, long nsQuota, long dsQuota) {
super(name, permissions, modificationTime);
AclFeature aclFeature, long modificationTime, long nsQuota,
long dsQuota) {
super(name, permissions, aclFeature, modificationTime);
this.nsQuota = nsQuota;
this.dsQuota = dsQuota;
}

View File

@ -151,12 +151,7 @@ public final INodeFile asFile() {
* otherwise, return null.
*/
public final FileUnderConstructionFeature getFileUnderConstructionFeature() {
for (Feature f : features) {
if (f instanceof FileUnderConstructionFeature) {
return (FileUnderConstructionFeature) f;
}
}
return null;
return getFeature(FileUnderConstructionFeature.class);
}
/** Is this file under construction? */
@ -265,12 +260,7 @@ public FileWithSnapshotFeature addSnapshotFeature(FileDiffList diffs) {
* otherwise, return null.
*/
public final FileWithSnapshotFeature getFileWithSnapshotFeature() {
for (Feature f: features) {
if (f instanceof FileWithSnapshotFeature) {
return (FileWithSnapshotFeature) f;
}
}
return null;
return getFeature(FileWithSnapshotFeature.class);
}
/** Is this file has the snapshot feature? */

View File

@ -41,9 +41,9 @@ public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
private final long header;
public SnapshotCopy(byte[] name, PermissionStatus permissions,
long modificationTime, long accessTime,
AclFeature aclFeature, long modificationTime, long accessTime,
short replication, long preferredBlockSize) {
super(name, permissions, modificationTime, accessTime);
super(name, permissions, aclFeature, modificationTime, accessTime);
final long h = HeaderFormat.combineReplication(0L, replication);
header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);

View File

@ -213,6 +213,22 @@ final void setGroup(String group) {
public final FsPermission getFsPermission(int snapshotId) {
return referred.getFsPermission(snapshotId);
}
@Override
final AclFeature getAclFeature(int snapshotId) {
return referred.getAclFeature(snapshotId);
}
@Override
final void addAclFeature(AclFeature aclFeature) {
referred.addAclFeature(aclFeature);
}
@Override
final void removeAclFeature() {
referred.removeAclFeature();
}
@Override
public final short getFsPermissionShort() {
return referred.getFsPermissionShort();

View File

@ -21,6 +21,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
@ -219,6 +220,15 @@ public long getPermissionLong() {
return permission;
}
@Override
final AclFeature getAclFeature(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getAclFeature();
}
return getFeature(AclFeature.class);
}
@Override
final long getModificationTime(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
@ -305,4 +315,33 @@ protected void removeFeature(Feature f) {
+ f.getClass().getSimpleName() + " not found.");
features = arr;
}
protected <T extends Feature> T getFeature(Class<? extends Feature> clazz) {
for (Feature f : features) {
if (f.getClass() == clazz) {
@SuppressWarnings("unchecked")
T ret = (T) f;
return ret;
}
}
return null;
}
public void removeAclFeature() {
AclFeature f = getAclFeature();
Preconditions.checkNotNull(f);
removeFeature(f);
}
public void addAclFeature(AclFeature f) {
AclFeature f1 = getAclFeature();
if (f1 != null)
throw new IllegalStateException("Duplicated ACLFeature");
addFeature(f);
}
public final Feature[] getFeatures() {
return features;
}
}

View File

@ -49,6 +49,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.HAServiceStatus;
@ -1287,5 +1289,37 @@ public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
throws IOException {
return namesystem.listCachePools(prevKey != null ? prevKey : "");
}
@Override
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
namesystem.modifyAclEntries(src, aclSpec);
}
@Override
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
namesystem.removeAclEntries(src, aclSpec);
}
@Override
public void removeDefaultAcl(String src) throws IOException {
namesystem.removeDefaultAcl(src);
}
@Override
public void removeAcl(String src) throws IOException {
namesystem.removeAcl(src);
}
@Override
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
namesystem.setAcl(src, aclSpec);
}
@Override
public AclStatus getAclStatus(String src) throws IOException {
return namesystem.getAclStatus(src);
}
}

View File

@ -0,0 +1,93 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
/**
* Groups a list of ACL entries into separate lists for access entries vs.
* default entries.
*/
@InterfaceAudience.Private
final class ScopedAclEntries {
private static final int PIVOT_NOT_FOUND = -1;
private final List<AclEntry> accessEntries;
private final List<AclEntry> defaultEntries;
/**
* Creates a new ScopedAclEntries from the given list. It is assumed that the
* list is already sorted such that all access entries precede all default
* entries.
*
* @param aclEntries List<AclEntry> to separate
*/
public ScopedAclEntries(List<AclEntry> aclEntries) {
int pivot = calculatePivotOnDefaultEntries(aclEntries);
if (pivot != PIVOT_NOT_FOUND) {
accessEntries = pivot != 0 ? aclEntries.subList(0, pivot) :
Collections.<AclEntry>emptyList();
defaultEntries = aclEntries.subList(pivot, aclEntries.size());
} else {
accessEntries = aclEntries;
defaultEntries = Collections.emptyList();
}
}
/**
* Returns access entries.
*
* @return List<AclEntry> containing just access entries, or an empty list if
* there are no access entries
*/
public List<AclEntry> getAccessEntries() {
return accessEntries;
}
/**
* Returns default entries.
*
* @return List<AclEntry> containing just default entries, or an empty list if
* there are no default entries
*/
public List<AclEntry> getDefaultEntries() {
return defaultEntries;
}
/**
* Returns the pivot point in the list between the access entries and the
* default entries. This is the index of the first element in the list that is
* a default entry.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
* @return int pivot point, or -1 if list contains no default entries
*/
private static int calculatePivotOnDefaultEntries(List<AclEntry> aclBuilder) {
for (int i = 0; i < aclBuilder.size(); ++i) {
if (aclBuilder.get(i).getScope() == AclEntryScope.DEFAULT) {
return i;
}
}
return PIVOT_NOT_FOUND;
}
}

View File

@ -36,8 +36,11 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
@ -154,7 +157,7 @@ private void loadSnapshots(InputStream in, int size) throws IOException {
SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot
.parseDelimitedFrom(in);
INodeDirectory root = loadINodeDirectory(pbs.getRoot(),
parent.getLoaderContext().getStringTable());
parent.getLoaderContext());
int sid = pbs.getSnapshotId();
INodeDirectorySnapshottable parent = (INodeDirectorySnapshottable) fsDir
.getInode(root.getId()).asDirectory();
@ -197,6 +200,7 @@ public void loadSnapshotDiffSection(InputStream in) throws IOException {
private void loadFileDiffList(InputStream in, INodeFile file, int size)
throws IOException {
final FileDiffList diffs = new FileDiffList();
final LoaderContext state = parent.getLoaderContext();
for (int i = 0; i < size; i++) {
SnapshotDiffSection.FileDiff pbf = SnapshotDiffSection.FileDiff
.parseDelimitedFrom(in);
@ -204,10 +208,16 @@ private void loadFileDiffList(InputStream in, INodeFile file, int size)
if (pbf.hasSnapshotCopy()) {
INodeSection.INodeFile fileInPb = pbf.getSnapshotCopy();
PermissionStatus permission = loadPermission(
fileInPb.getPermission(), parent.getLoaderContext()
.getStringTable());
fileInPb.getPermission(), state.getStringTable());
AclFeature acl = null;
if (fileInPb.hasAcl()) {
acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
fileInPb.getAcl(), state.getStringTable()));
}
copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
.toByteArray(), permission, fileInPb.getModificationTime(),
.toByteArray(), permission, acl, fileInPb.getModificationTime(),
fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
fileInPb.getPreferredBlockSize());
}
@ -277,6 +287,8 @@ private void loadDirectoryDiffList(InputStream in, INodeDirectory dir,
dir.addSnapshotFeature(null);
}
DirectoryDiffList diffs = dir.getDiffs();
final LoaderContext state = parent.getLoaderContext();
for (int i = 0; i < size; i++) {
// load a directory diff
SnapshotDiffSection.DirectoryDiff diffInPb = SnapshotDiffSection.
@ -292,15 +304,22 @@ private void loadDirectoryDiffList(InputStream in, INodeDirectory dir,
INodeSection.INodeDirectory dirCopyInPb = diffInPb.getSnapshotCopy();
final byte[] name = diffInPb.getName().toByteArray();
PermissionStatus permission = loadPermission(
dirCopyInPb.getPermission(), parent.getLoaderContext()
.getStringTable());
dirCopyInPb.getPermission(), state.getStringTable());
AclFeature acl = null;
if (dirCopyInPb.hasAcl()) {
acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
dirCopyInPb.getAcl(), state.getStringTable()));
}
long modTime = dirCopyInPb.getModificationTime();
boolean noQuota = dirCopyInPb.getNsQuota() == -1
&& dirCopyInPb.getDsQuota() == -1;
copy = noQuota ? new INodeDirectoryAttributes.SnapshotCopy(name,
permission, modTime)
permission, acl, modTime)
: new INodeDirectoryAttributes.CopyWithQuota(name, permission,
modTime, dirCopyInPb.getNsQuota(), dirCopyInPb.getDsQuota());
acl, modTime, dirCopyInPb.getNsQuota(),
dirCopyInPb.getDsQuota());
}
// load created list
List<INode> clist = loadCreatedList(in, dir,
@ -355,7 +374,7 @@ public void serializeSnapshotSection(OutputStream out) throws IOException {
SnapshotSection.Snapshot.Builder sb = SnapshotSection.Snapshot
.newBuilder().setSnapshotId(s.getId());
INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot,
parent.getSaverContext().getStringMap());
parent.getSaverContext());
INodeSection.INode r = INodeSection.INode.newBuilder()
.setId(sroot.getId())
.setType(INodeSection.INode.Type.DIRECTORY)
@ -443,7 +462,7 @@ private void serializeFileDiffList(INodeFile file, OutputStream out)
INodeFileAttributes copy = diff.snapshotINode;
if (copy != null) {
fb.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
.setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext().getStringMap()));
.setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext()));
}
fb.build().writeDelimitedTo(out);
}
@ -480,7 +499,7 @@ private void serializeDirDiffList(INodeDirectory dir,
if (!diff.isSnapshotRoot() && copy != null) {
db.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
.setSnapshotCopy(
buildINodeDirectory(copy, parent.getSaverContext().getStringMap()));
buildINodeDirectory(copy, parent.getSaverContext()));
}
// process created list and deleted list
List<INode> created = diff.getChildrenDiff()

View File

@ -184,7 +184,7 @@ ReadOnlyList<Snapshot> getSnapshotsByNames() {
private int snapshotQuota = SNAPSHOT_LIMIT;
public INodeDirectorySnapshottable(INodeDirectory dir) {
super(dir, true, true);
super(dir, true, dir.getFeatures());
// add snapshot feature if the original directory does not have it
if (!isWithSnapshot()) {
addSnapshotFeature(null);

View File

@ -21,6 +21,7 @@
import java.io.DataOutput;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Date;
@ -28,12 +29,16 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
/** Snapshot of a sub-tree in the namesystem. */
@InterfaceAudience.Private
public class Snapshot implements Comparable<byte[]> {
@ -139,7 +144,10 @@ static Snapshot read(DataInput in, FSImageFormat.Loader loader)
/** The root directory of the snapshot. */
static public class Root extends INodeDirectory {
Root(INodeDirectory other) {
super(other, false, false);
// Always preserve ACL.
super(other, false, Lists.newArrayList(
Iterables.filter(Arrays.asList(other.getFeatures()), AclFeature.class))
.toArray(new Feature[0]));
}
@Override

View File

@ -53,6 +53,7 @@
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -71,6 +72,7 @@
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.AclPermissionParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam;
@ -320,12 +322,14 @@ public Response putRoot(
@QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT)
final CreateParentParam createParent,
@QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
final TokenArgumentParam delegationTokenArgument
) throws IOException, InterruptedException {
final TokenArgumentParam delegationTokenArgument,
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
final AclPermissionParam aclPermission
)throws IOException, InterruptedException {
return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
owner, group, permission, overwrite, bufferSize, replication,
blockSize, modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument);
delegationTokenArgument,aclPermission);
}
/** Handle HTTP PUT request. */
@ -369,12 +373,14 @@ public Response put(
@QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT)
final CreateParentParam createParent,
@QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
final TokenArgumentParam delegationTokenArgument
final TokenArgumentParam delegationTokenArgument,
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
final AclPermissionParam aclPermission
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, destination, owner,
group, permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, delegationTokenArgument);
modificationTime, accessTime, renameOptions, delegationTokenArgument,aclPermission);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
@ -385,7 +391,7 @@ public Response run() throws IOException, URISyntaxException {
path.getAbsolutePath(), op, destination, owner, group,
permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument);
delegationTokenArgument,aclPermission);
} finally {
REMOTE_ADDRESS.set(null);
}
@ -412,7 +418,8 @@ private Response put(
final AccessTimeParam accessTime,
final RenameOptionSetParam renameOptions,
final CreateParentParam createParent,
final TokenArgumentParam delegationTokenArgument
final TokenArgumentParam delegationTokenArgument,
final AclPermissionParam aclPermission
) throws IOException, URISyntaxException {
final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
@ -492,6 +499,26 @@ private Response put(
np.cancelDelegationToken(token);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case MODIFYACLENTRIES: {
np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEACLENTRIES: {
np.removeAclEntries(fullpath, aclPermission.getAclPermission(false));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEDEFAULTACL: {
np.removeDefaultAcl(fullpath);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEACL: {
np.removeAcl(fullpath);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case SETACL: {
np.setAcl(fullpath, aclPermission.getAclPermission(true));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
@ -732,6 +759,15 @@ private Response get(
WebHdfsFileSystem.getHomeDirectoryString(ugi));
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETACLSTATUS: {
AclStatus status = np.getAclStatus(fullpath);
if (status == null) {
throw new FileNotFoundException("File does not exist: " + fullpath);
}
final String js = JsonUtil.toJsonString(status);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.*;
@ -616,4 +618,44 @@ public static MD5MD5CRC32FileChecksum toMD5MD5CRC32FileChecksum(
return checksum;
}
/** Convert a AclStatus object to a Json string. */
public static String toJsonString(final AclStatus status) {
if (status == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("owner", status.getOwner());
m.put("group", status.getGroup());
m.put("stickyBit", status.isStickyBit());
m.put("entries", status.getEntries());
final Map<String, Map<String, Object>> finalMap =
new TreeMap<String, Map<String, Object>>();
finalMap.put(AclStatus.class.getSimpleName(), m);
return JSON.toString(finalMap);
}
/** Convert a Json map to a AclStatus object. */
public static AclStatus toAclStatus(final Map<?, ?> json) {
if (json == null) {
return null;
}
final Map<?, ?> m = (Map<?, ?>) json.get(AclStatus.class.getSimpleName());
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
aclStatusBuilder.owner((String) m.get("owner"));
aclStatusBuilder.group((String) m.get("group"));
aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit"));
final Object[] entries = (Object[]) m.get("entries");
List<AclEntry> aclEntryList = new ArrayList<AclEntry>();
for (int i = 0; i < entries.length; i++) {
AclEntry aclEntry = AclEntry.parseAclEntry((String) entries[i], true);
aclEntryList.add(aclEntry);
}
aclStatusBuilder.addEntries(aclEntryList);
return aclStatusBuilder.build();
}
}

View File

@ -49,6 +49,8 @@
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
@ -57,6 +59,7 @@
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.AclPermissionParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam;
@ -697,6 +700,17 @@ private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
}
@Override
public AclStatus getAclStatus(Path f) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETACLSTATUS;
final Map<?, ?> json = run(op, f);
AclStatus status = JsonUtil.toAclStatus(json);
if (status == null) {
throw new FileNotFoundException("File does not exist: " + f);
}
return status;
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
statistics.incrementWriteOps(1);
@ -757,6 +771,44 @@ public void setPermission(final Path p, final FsPermission permission
run(op, p, new PermissionParam(permission));
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.MODIFYACLENTRIES;
run(op, path, new AclPermissionParam(aclSpec));
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.REMOVEACLENTRIES;
run(op, path, new AclPermissionParam(aclSpec));
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.REMOVEDEFAULTACL;
run(op, path);
}
@Override
public void removeAcl(Path path) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.REMOVEACL;
run(op, path);
}
@Override
public void setAcl(final Path p, final List<AclEntry> aclSpec)
throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.SETACL;
run(op, p, new AclPermissionParam(aclSpec));
}
@Override
public boolean setReplication(final Path p, final short replication
) throws IOException {

View File

@ -0,0 +1,71 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.commons.lang.StringUtils;
/** AclPermission parameter. */
public class AclPermissionParam extends StringParam {
/** Parameter name. */
public static final String NAME = "aclspec";
/** Default parameter value. */
public static final String DEFAULT = "";
private static Domain DOMAIN = new Domain(NAME,
Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
/**
* Constructor.
*
* @param str a string representation of the parameter value.
*/
public AclPermissionParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
}
public AclPermissionParam(List<AclEntry> acl) {
super(DOMAIN,parseAclSpec(acl).equals(DEFAULT) ? null : parseAclSpec(acl));
}
@Override
public String getName() {
return NAME;
}
public List<AclEntry> getAclPermission(boolean includePermission) {
final String v = getValue();
return (v != null ? AclEntry.parseAclSpec(v, includePermission) : AclEntry
.parseAclSpec(DEFAULT, includePermission));
}
/**
* Parse the list of AclEntry and returns aclspec.
*
* @param List <AclEntry>
* @return String
*/
private static String parseAclSpec(List<AclEntry> aclEntry) {
return StringUtils.join(aclEntry, ",");
}
}

View File

@ -35,6 +35,7 @@ public static enum Op implements HttpOpParam.Op {
/** GET_BLOCK_LOCATIONS is a private unstable op. */
GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);

View File

@ -37,6 +37,12 @@ public static enum Op implements HttpOpParam.Op {
RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
MODIFYACLENTRIES(false, HttpURLConnection.HTTP_OK),
REMOVEACLENTRIES(false, HttpURLConnection.HTTP_OK),
REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK),
REMOVEACL(false, HttpURLConnection.HTTP_OK),
SETACL(false, HttpURLConnection.HTTP_OK),
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
final boolean doOutputAndRedirect;

View File

@ -30,6 +30,7 @@ package hadoop.hdfs;
import "Security.proto";
import "hdfs.proto";
import "acl.proto";
/**
* The ClientNamenodeProtocol Service defines the interface between a client
@ -719,4 +720,16 @@ service ClientNamenodeProtocol {
returns(GetSnapshotDiffReportResponseProto);
rpc isFileClosed(IsFileClosedRequestProto)
returns(IsFileClosedResponseProto);
rpc modifyAclEntries(ModifyAclEntriesRequestProto)
returns(ModifyAclEntriesResponseProto);
rpc removeAclEntries(RemoveAclEntriesRequestProto)
returns(RemoveAclEntriesResponseProto);
rpc removeDefaultAcl(RemoveDefaultAclRequestProto)
returns(RemoveDefaultAclResponseProto);
rpc removeAcl(RemoveAclRequestProto)
returns(RemoveAclResponseProto);
rpc setAcl(SetAclRequestProto)
returns(SetAclResponseProto);
rpc getAclStatus(GetAclStatusRequestProto)
returns(GetAclStatusResponseProto);
}

View File

@ -0,0 +1,112 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
option java_outer_classname = "AclProtos";
option java_generate_equals_and_hash = true;
package hadoop.hdfs;
import "hdfs.proto";
message AclEntryProto {
enum AclEntryScopeProto {
ACCESS = 0x0;
DEFAULT = 0x1;
}
enum AclEntryTypeProto {
USER = 0x0;
GROUP = 0x1;
MASK = 0x2;
OTHER = 0x3;
}
enum FsActionProto {
NONE = 0x0;
EXECUTE = 0x1;
WRITE = 0x2;
WRITE_EXECUTE = 0x3;
READ = 0x4;
READ_EXECUTE = 0x5;
READ_WRITE = 0x6;
PERM_ALL = 0x7;
}
required AclEntryTypeProto type = 1;
required AclEntryScopeProto scope = 2;
required FsActionProto permissions = 3;
optional string name = 4;
}
message AclStatusProto {
required string owner = 1;
required string group = 2;
required bool sticky = 3;
repeated AclEntryProto entries = 4;
}
message AclEditLogProto {
required string src = 1;
repeated AclEntryProto entries = 2;
}
message ModifyAclEntriesRequestProto {
required string src = 1;
repeated AclEntryProto aclSpec = 2;
}
message ModifyAclEntriesResponseProto {
}
message RemoveAclRequestProto {
required string src = 1;
}
message RemoveAclResponseProto {
}
message RemoveAclEntriesRequestProto {
required string src = 1;
repeated AclEntryProto aclSpec = 2;
}
message RemoveAclEntriesResponseProto {
}
message RemoveDefaultAclRequestProto {
required string src = 1;
}
message RemoveDefaultAclResponseProto {
}
message SetAclRequestProto {
required string src = 1;
repeated AclEntryProto aclSpec = 2;
}
message SetAclResponseProto {
}
message GetAclStatusRequestProto {
required string src = 1;
}
message GetAclStatusResponseProto {
required AclStatusProto result = 1;
}

View File

@ -22,6 +22,7 @@ option java_outer_classname = "FsImageProto";
package hadoop.hdfs.fsimage;
import "hdfs.proto";
import "acl.proto";
/**
* This file defines the on-disk layout of the file system image. The
@ -88,6 +89,23 @@ message INodeSection {
optional string clientMachine = 2;
}
message AclFeatureProto {
/**
* An ACL entry is represented by a 32-bit integer in Big Endian
* format. The bits can be divided in four segments:
* [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
*
* [0:2) -- reserved for futute uses.
* [2:26) -- the name of the entry, which is an ID that points to a
* string in the StringTableSection.
* [26:27) -- the scope of the entry (AclEntryScopeProto)
* [27:29) -- the type of the entry (AclEntryTypeProto)
* [29:32) -- the permission of the entry (FsActionProto)
*
*/
repeated fixed32 entries = 2 [packed = true];
}
message INodeFile {
optional uint32 replication = 1;
optional uint64 modificationTime = 2;
@ -96,6 +114,7 @@ message INodeSection {
optional fixed64 permission = 5;
repeated BlockProto blocks = 6;
optional FileUnderConstructionFeature fileUC = 7;
optional AclFeatureProto acl = 8;
}
message INodeDirectory {
@ -105,6 +124,7 @@ message INodeSection {
// diskspace quota
optional uint64 dsQuota = 3;
optional fixed64 permission = 4;
optional AclFeatureProto acl = 5;
}
message INodeSymlink {

View File

@ -363,6 +363,16 @@
</property>
-->
<property>
<name>dfs.namenode.acls.enabled</name>
<value>false</value>
<description>
Set to true to enable support for HDFS ACLs (Access Control Lists). By
default, ACLs are disabled. When ACLs are disabled, the NameNode rejects
all RPCs related to setting or getting ACLs.
</description>
</property>
<property>
<name>dfs.block.access.token.enable</name>
<value>false</value>

View File

@ -47,6 +47,10 @@ HDFS Permissions Guide
client process, and its group is the group of the parent directory (the
BSD rule).
HDFS also provides optional support for POSIX ACLs (Access Control Lists) to
augment file permissions with finer-grained rules for specific named users or
named groups. ACLs are discussed in greater detail later in this document.
Each client process that accesses HDFS has a two-part identity composed
of the user name, and groups list. Whenever HDFS must do a permissions
check for a file or directory foo accessed by a client process,
@ -219,9 +223,173 @@ HDFS Permissions Guide
identity matches the super-user, parts of the name space may be
inaccessible to the web server.
* ACLs (Access Control Lists)
In addition to the traditional POSIX permissions model, HDFS also supports
POSIX ACLs (Access Control Lists). ACLs are useful for implementing
permission requirements that differ from the natural organizational hierarchy
of users and groups. An ACL provides a way to set different permissions for
specific named users or named groups, not only the file's owner and the
file's group.
By default, support for ACLs is disabled, and the NameNode disallows creation
of ACLs. To enable support for ACLs, set <<<dfs.namenode.acls.enabled>>> to
true in the NameNode configuration.
An ACL consists of a set of ACL entries. Each ACL entry names a specific
user or group and grants or denies read, write and execute permissions for
that specific user or group. For example:
+--
user::rw-
user:bruce:rwx #effective:r--
group::r-x #effective:r--
group:sales:rwx #effective:r--
mask::r--
other::r--
+--
ACL entries consist of a type, an optional name and a permission string.
For display purposes, ':' is used as the delimiter between each field. In
this example ACL, the file owner has read-write access, the file group has
read-execute access and others have read access. So far, this is equivalent
to setting the file's permission bits to 654.
Additionally, there are 2 extended ACL entries for the named user bruce and
the named group sales, both granted full access. The mask is a special ACL
entry that filters the permissions granted to all named user entries and
named group entries, and also the unnamed group entry. In the example, the
mask has only read permissions, and we can see that the effective permissions
of several ACL entries have been filtered accordingly.
Every ACL must have a mask. If the user doesn't supply a mask while setting
an ACL, then a mask is inserted automatically by calculating the union of
permissions on all entries that would be filtered by the mask.
Running <<<chmod>>> on a file that has an ACL actually changes the
permissions of the mask. Since the mask acts as a filter, this effectively
constrains the permissions of all extended ACL entries instead of changing
just the group entry and possibly missing other extended ACL entries.
The model also differentiates between an "access ACL", which defines the
rules to enforce during permission checks, and a "default ACL", which defines
the ACL entries that new child files or sub-directories receive automatically
during creation. For example:
+--
user::rwx
group::r-x
other::r-x
default:user::rwx
default:user:bruce:rwx #effective:r-x
default:group::r-x
default:group:sales:rwx #effective:r-x
default:mask::r-x
default:other::r-x
+--
Only directories may have a default ACL. When a new file or sub-directory is
created, it automatically copies the default ACL of its parent into its own
access ACL. A new sub-directory also copies it to its own default ACL. In
this way, the default ACL will be copied down through arbitrarily deep levels
of the file system tree as new sub-directories get created.
The exact permission values in the new child's access ACL are subject to
filtering by the mode parameter. Considering the default umask of 022, this
is typically 755 for new directories and 644 for new files. The mode
parameter filters the copied permission values for the unnamed user (file
owner), the mask and other. Using this particular example ACL, and creating
a new sub-directory with 755 for the mode, this mode filtering has no effect
on the final result. However, if we consider creation of a file with 644 for
the mode, then mode filtering causes the new file's ACL to receive read-write
for the unnamed user (file owner), read for the mask and read for others.
This mask also means that effective permissions for named user bruce and
named group sales are only read.
Note that the copy occurs at time of creation of the new file or
sub-directory. Subsequent changes to the parent's default ACL do not change
existing children.
The default ACL must have all minimum required ACL entries, including the
unnamed user (file owner), unnamed group (file group) and other entries. If
the user doesn't supply one of these entries while setting a default ACL,
then the entries are inserted automatically by copying the corresponding
permissions from the access ACL, or permission bits if there is no access
ACL. The default ACL also must have mask. As described above, if the mask
is unspecified, then a mask is inserted automatically by calculating the
union of permissions on all entries that would be filtered by the mask.
When considering a file that has an ACL, the algorithm for permission checks
changes to:
* If the user name matches the owner of file, then the owner
permissions are tested;
* Else if the user name matches the name in one of the named user entries,
then these permissions are tested, filtered by the mask permissions;
* Else if the group of file matches any member of the groups list,
and if these permissions filtered by the mask grant access, then these
permissions are used;
* Else if there is a named group entry matching a member of the groups list,
and if these permissions filtered by the mask grant access, then these
permissions are used;
* Else if the file group or any named group entry matches a member of the
groups list, but access was not granted by any of those permissions, then
access is denied;
* Otherwise the other permissions of file are tested.
Best practice is to rely on traditional permission bits to implement most
permission requirements, and define a smaller number of ACLs to augment the
permission bits with a few exceptional rules. A file with an ACL incurs an
additional cost in memory in the NameNode compared to a file that has only
permission bits.
* ACLs File System API
New methods:
* <<<public void modifyAclEntries(Path path, List<AclEntry> aclSpec) throws
IOException;>>>
* <<<public void removeAclEntries(Path path, List<AclEntry> aclSpec) throws
IOException;>>>
* <<<public void public void removeDefaultAcl(Path path) throws
IOException;>>>
* <<<public void removeAcl(Path path) throws IOException;>>>
* <<<public void setAcl(Path path, List<AclEntry> aclSpec) throws
IOException;>>>
* <<<public AclStatus getAclStatus(Path path) throws IOException;>>>
* ACLs Shell Commands
* <<<hdfs dfs -getfacl [-R] <path> >>>
Displays the Access Control Lists (ACLs) of files and directories. If a
directory has a default ACL, then getfacl also displays the default ACL.
* <<<hdfs dfs -setfacl [-R] [{-b|-k} {-m|-x <acl_spec>} <path>]|[--set <acl_spec> <path>] >>>
Sets Access Control Lists (ACLs) of files and directories.
* <<<hdfs dfs -ls <args> >>>
The output of <<<ls>>> will append a '+' character to the permissions
string of any file or directory that has an ACL.
See the {{{../hadoop-common/FileSystemShell.html}File System Shell}}
documentation for full coverage of these commands.
* Configuration Parameters
* <<<dfs.permissions = true>>>
* <<<dfs.permissions.enabled = true>>>
If yes use the permissions system as described here. If no,
permission checking is turned off, but all other behavior is
@ -255,3 +423,9 @@ HDFS Permissions Guide
The administrators for the cluster specified as an ACL. This
controls who can access the default servlets, etc. in the HDFS.
* <<<dfs.namenode.acls.enabled = true>>>
Set to true to enable support for HDFS ACLs (Access Control Lists). By
default, ACLs are disabled. When ACLs are disabled, the NameNode rejects
all attempts to set an ACL.

View File

@ -752,6 +752,148 @@ Content-Length: 0
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setTimes
** {Modify ACL Entries}
* Submit a HTTP PUT request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=MODIFYACLENTRIES
&aclspec=<ACLSPEC>"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.modifyAclEntries
** {Remove ACL Entries}
* Submit a HTTP PUT request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEACLENTRIES
&aclspec=<ACLSPEC>"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeAclEntries
** {Remove Default ACL}
* Submit a HTTP PUT request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEDEFAULTACL"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeDefaultAcl
** {Remove ACL}
* Submit a HTTP PUT request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEACL"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeAcl
** {Set ACL}
* Submit a HTTP PUT request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETACL
&aclspec=<ACLSPEC>"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setAcl
** {Get ACL Status}
* Submit a HTTP GET request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETACLSTATUS"
+---------------------------------
The client receives a response with a {{{ACL Status JSON Schema}<<<AclStatus>>> JSON object}}:
+---------------------------------
HTTP/1.1 200 OK
Content-Type: application/json
Transfer-Encoding: chunked
{
"AclStatus": {
"entries": [
"user:carla:rw-",
"group::r-x"
],
"group": "supergroup",
"owner": "hadoop",
"stickyBit": false
}
}
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
* {Delegation Token Operations}
** {Get Delegation Token}
@ -980,6 +1122,52 @@ Transfer-Encoding: chunked
However, if additional properties are included in the responses, they are
considered as optional properties in order to maintain compatibility.
** {ACL Status JSON Schema}
+---------------------------------
{
"name" : "AclStatus",
"properties":
{
"AclStatus":
{
"type" : "object",
"properties":
{
"entries":
{
"type": "array"
"items":
{
"description": "ACL entry.",
"type": "string"
}
},
"group":
{
"description": "The group owner.",
"type" : "string",
"required" : true
},
"owner":
{
"description": "The user who is the owner.",
"type" : "string",
"required" : true
},
"stickyBit":
{
"description": "True if the sticky bit is on.",
"type" : "boolean",
"required" : true
},
}
}
}
}
+---------------------------------
** {Boolean JSON Schema}
+---------------------------------
@ -1387,6 +1575,23 @@ var tokenProperties =
* {HTTP Query Parameter Dictionary}
** {ACL Spec}
*----------------+-------------------------------------------------------------------+
|| Name | <<<aclspec>>> |
*----------------+-------------------------------------------------------------------+
|| Description | The ACL spec included in ACL modification operations. |
*----------------+-------------------------------------------------------------------+
|| Type | String |
*----------------+-------------------------------------------------------------------+
|| Default Value | \<empty\> |
*----------------+-------------------------------------------------------------------+
|| Valid Values | See {{{./HdfsPermissionsGuide.html}Permissions and HDFS}}. |
*----------------+-------------------------------------------------------------------+
|| Syntax | See {{{./HdfsPermissionsGuide.html}Permissions and HDFS}}. |
*----------------+-------------------------------------------------------------------+
** {Access Time}
*----------------+-------------------------------------------------------------------+

View File

@ -0,0 +1,84 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestAclCLI extends CLITestHelperDFS {
private MiniDFSCluster cluster = null;
private FileSystem fs = null;
private String namenode = null;
private String username = null;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
fs = cluster.getFileSystem();
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
username = System.getProperty("user.name");
}
@After
@Override
public void tearDown() throws Exception {
super.tearDown();
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
@Override
protected String getTestFile() {
return "testAclCLI.xml";
}
@Override
protected String expandCommand(final String cmd) {
String expCmd = cmd;
expCmd = expCmd.replaceAll("NAMENODE", namenode);
expCmd = expCmd.replaceAll("USERNAME", username);
expCmd = expCmd.replaceAll("#LF#",
System.getProperty("line.separator"));
expCmd = super.expandCommand(expCmd);
return expCmd;
}
@Override
protected Result execute(CLICommand cmd) throws Exception {
return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
}
@Test
@Override
public void testAll() {
super.testAll();
}
}

View File

@ -17,15 +17,21 @@
*/
package org.apache.hadoop.fs.permission;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -33,8 +39,12 @@
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestStickyBit {
@ -43,56 +53,89 @@ public class TestStickyBit {
UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
static UserGroupInformation user2 =
UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"});
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem hdfs;
private static FileSystem hdfsAsUser1;
private static FileSystem hdfsAsUser2;
@BeforeClass
public static void init() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
initCluster(true);
}
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
.build();
hdfs = cluster.getFileSystem();
assertTrue(hdfs instanceof DistributedFileSystem);
hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
}
@Before
public void setup() throws Exception {
if (hdfs != null) {
for (FileStatus stat: hdfs.listStatus(new Path("/"))) {
hdfs.delete(stat.getPath(), true);
}
}
}
@AfterClass
public static void shutdown() throws Exception {
IOUtils.cleanup(null, hdfs, hdfsAsUser1, hdfsAsUser2);
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Ensure that even if a file is in a directory with the sticky bit on,
* another user can write to that file (assuming correct permissions).
*/
private void confirmCanAppend(Configuration conf, FileSystem hdfs,
Path baseDir) throws IOException, InterruptedException {
// Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
private void confirmCanAppend(Configuration conf, Path p) throws Exception {
// Write a file to the new tmp directory as a regular user
hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
Path file = new Path(p, "foo");
writeFile(hdfs, file);
hdfs.setPermission(file, new FsPermission((short) 0777));
writeFile(hdfsAsUser1, file);
hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));
// Log onto cluster as another user and attempt to append to file
hdfs = DFSTestUtil.getFileSystemAs(user2, conf);
Path file2 = new Path(p, "foo");
FSDataOutputStream h = hdfs.append(file2);
h.write("Some more data".getBytes());
h.close();
FSDataOutputStream h = null;
try {
h = hdfsAsUser2.append(file2);
h.write("Some more data".getBytes());
h.close();
h = null;
} finally {
IOUtils.cleanup(null, h);
}
}
/**
* Test that one user can't delete another user's file when the sticky bit is
* set.
*/
private void confirmDeletingFiles(Configuration conf, FileSystem hdfs,
Path baseDir) throws IOException, InterruptedException {
Path p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
private void confirmDeletingFiles(Configuration conf, Path p)
throws Exception {
// Write a file to the new temp directory as a regular user
hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
Path file = new Path(p, "foo");
writeFile(hdfs, file);
writeFile(hdfsAsUser1, file);
// Make sure the correct user is the owner
assertEquals(user1.getShortUserName(), hdfs.getFileStatus(file).getOwner());
assertEquals(user1.getShortUserName(),
hdfsAsUser1.getFileStatus(file).getOwner());
// Log onto cluster as another user and attempt to delete the file
FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user2, conf);
try {
hdfs2.delete(file, false);
hdfsAsUser2.delete(file, false);
fail("Shouldn't be able to delete someone else's file with SB on");
} catch (IOException ioe) {
assertTrue(ioe instanceof AccessControlException);
@ -105,13 +148,8 @@ private void confirmDeletingFiles(Configuration conf, FileSystem hdfs,
* on, the new directory does not automatically get a sticky bit, as is
* standard Unix behavior
*/
private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path baseDir)
private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path p)
throws IOException {
Path p = new Path(baseDir, "scissorsisters");
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
// Create a subdirectory within it
Path p2 = new Path(p, "bar");
hdfs.mkdirs(p2);
@ -123,23 +161,19 @@ private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path baseDir)
/**
* Test basic ability to get and set sticky bits on files and directories.
*/
private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir)
private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir)
throws IOException {
Path p1 = new Path(baseDir, "roguetraders");
hdfs.mkdirs(p1);
// Initially sticky bit should not be set
assertFalse(hdfs.getFileStatus(p1).getPermission().getStickyBit());
assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit());
// Same permission, but with sticky bit on
short withSB;
withSB = (short) (hdfs.getFileStatus(p1).getPermission().toShort() | 01000);
withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000);
assertTrue((new FsPermission(withSB)).getStickyBit());
hdfs.setPermission(p1, new FsPermission(withSB));
assertTrue(hdfs.getFileStatus(p1).getPermission().getStickyBit());
hdfs.setPermission(p, new FsPermission(withSB));
assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit());
// Write a file to the fs, try to set its sticky bit
Path f = new Path(baseDir, "somefile");
@ -154,37 +188,78 @@ private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir)
}
@Test
public void testGeneralSBBehavior() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
public void testGeneralSBBehavior() throws Exception {
Path baseDir = new Path("/mcgann");
hdfs.mkdirs(baseDir);
FileSystem hdfs = cluster.getFileSystem();
// Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
assertTrue(hdfs instanceof DistributedFileSystem);
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
Path baseDir = new Path("/mcgann");
hdfs.mkdirs(baseDir);
confirmCanAppend(conf, hdfs, baseDir);
confirmCanAppend(conf, p);
baseDir = new Path("/eccleston");
hdfs.mkdirs(baseDir);
confirmSettingAndGetting(hdfs, baseDir);
baseDir = new Path("/eccleston");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "roguetraders");
baseDir = new Path("/tennant");
hdfs.mkdirs(baseDir);
confirmDeletingFiles(conf, hdfs, baseDir);
hdfs.mkdirs(p);
confirmSettingAndGetting(hdfs, p, baseDir);
baseDir = new Path("/smith");
hdfs.mkdirs(baseDir);
confirmStickyBitDoesntPropagate(hdfs, baseDir);
baseDir = new Path("/tennant");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
confirmDeletingFiles(conf, p);
} finally {
if (cluster != null)
cluster.shutdown();
}
baseDir = new Path("/smith");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "scissorsisters");
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
confirmStickyBitDoesntPropagate(hdfs, baseDir);
}
@Test
public void testAclGeneralSBBehavior() throws Exception {
Path baseDir = new Path("/mcgann");
hdfs.mkdirs(baseDir);
// Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
applyAcl(p);
confirmCanAppend(conf, p);
baseDir = new Path("/eccleston");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "roguetraders");
hdfs.mkdirs(p);
applyAcl(p);
confirmSettingAndGetting(hdfs, p, baseDir);
baseDir = new Path("/tennant");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
applyAcl(p);
confirmDeletingFiles(conf, p);
baseDir = new Path("/smith");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "scissorsisters");
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
applyAcl(p);
confirmStickyBitDoesntPropagate(hdfs, p);
}
/**
@ -192,46 +267,42 @@ public void testGeneralSBBehavior() throws IOException, InterruptedException {
* bit is set.
*/
@Test
public void testMovingFiles() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
public void testMovingFiles() throws Exception {
testMovingFiles(false);
}
@Test
public void testAclMovingFiles() throws Exception {
testMovingFiles(true);
}
private void testMovingFiles(boolean useAcl) throws Exception {
// Create a tmp directory with wide-open permissions and sticky bit
Path tmpPath = new Path("/tmp");
Path tmpPath2 = new Path("/tmp2");
hdfs.mkdirs(tmpPath);
hdfs.mkdirs(tmpPath2);
hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
if (useAcl) {
applyAcl(tmpPath);
}
hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
if (useAcl) {
applyAcl(tmpPath2);
}
// Write a file to the new tmp directory as a regular user
Path file = new Path(tmpPath, "foo");
writeFile(hdfsAsUser1, file);
// Log onto cluster as another user and attempt to move the file
try {
// Set up cluster for testing
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem hdfs = cluster.getFileSystem();
assertTrue(hdfs instanceof DistributedFileSystem);
// Create a tmp directory with wide-open permissions and sticky bit
Path tmpPath = new Path("/tmp");
Path tmpPath2 = new Path("/tmp2");
hdfs.mkdirs(tmpPath);
hdfs.mkdirs(tmpPath2);
hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
// Write a file to the new tmp directory as a regular user
Path file = new Path(tmpPath, "foo");
FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user1, conf);
writeFile(hdfs2, file);
// Log onto cluster as another user and attempt to move the file
FileSystem hdfs3 = DFSTestUtil.getFileSystemAs(user2, conf);
try {
hdfs3.rename(file, new Path(tmpPath2, "renamed"));
fail("Shouldn't be able to rename someone else's file with SB on");
} catch (IOException ioe) {
assertTrue(ioe instanceof AccessControlException);
assertTrue(ioe.getMessage().contains("sticky bit"));
}
} finally {
if (cluster != null)
cluster.shutdown();
hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed"));
fail("Shouldn't be able to rename someone else's file with SB on");
} catch (IOException ioe) {
assertTrue(ioe instanceof AccessControlException);
assertTrue(ioe.getMessage().contains("sticky bit"));
}
}
@ -241,56 +312,91 @@ public void testMovingFiles() throws IOException, InterruptedException {
* re-start.
*/
@Test
public void testStickyBitPersistence() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem hdfs = cluster.getFileSystem();
public void testStickyBitPersistence() throws Exception {
// A tale of three directories...
Path sbSet = new Path("/Housemartins");
Path sbNotSpecified = new Path("/INXS");
Path sbSetOff = new Path("/Easyworld");
assertTrue(hdfs instanceof DistributedFileSystem);
for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
hdfs.mkdirs(p);
// A tale of three directories...
Path sbSet = new Path("/Housemartins");
Path sbNotSpecified = new Path("/INXS");
Path sbSetOff = new Path("/Easyworld");
// Two directories had there sticky bits set explicitly...
hdfs.setPermission(sbSet, new FsPermission((short) 01777));
hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
hdfs.mkdirs(p);
shutdown();
// Two directories had there sticky bits set explicitly...
hdfs.setPermission(sbSet, new FsPermission((short) 01777));
hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
// Start file system up again
initCluster(false);
cluster.shutdown();
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
// Start file system up again
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build();
hdfs = cluster.getFileSystem();
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
.getStickyBit());
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
}
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
.getStickyBit());
@Test
public void testAclStickyBitPersistence() throws Exception {
// A tale of three directories...
Path sbSet = new Path("/Housemartins");
Path sbNotSpecified = new Path("/INXS");
Path sbSetOff = new Path("/Easyworld");
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
hdfs.mkdirs(p);
} finally {
if (cluster != null)
cluster.shutdown();
}
// Two directories had there sticky bits set explicitly...
hdfs.setPermission(sbSet, new FsPermission((short) 01777));
applyAcl(sbSet);
hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
applyAcl(sbSetOff);
shutdown();
// Start file system up again
initCluster(false);
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
.getStickyBit());
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
}
/***
* Write a quick file to the specified file system at specified path
*/
static private void writeFile(FileSystem hdfs, Path p) throws IOException {
FSDataOutputStream o = hdfs.create(p);
o.write("some file contents".getBytes());
o.close();
FSDataOutputStream o = null;
try {
o = hdfs.create(p);
o.write("some file contents".getBytes());
o.close();
o = null;
} finally {
IOUtils.cleanup(null, o);
}
}
/**
* Applies an ACL (both access and default) to the given path.
*
* @param p Path to set
* @throws IOException if an ACL could not be modified
*/
private static void applyAcl(Path p) throws IOException {
hdfs.modifyAclEntries(p, Arrays.asList(
aclEntry(ACCESS, USER, user2.getShortUserName(), ALL),
aclEntry(DEFAULT, USER, user2.getShortUserName(), ALL)));
}
}

View File

@ -20,6 +20,7 @@
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
@ -29,6 +30,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@ -1095,6 +1097,8 @@ public static void runOperations(MiniDFSCluster cluster,
filesystem.removeCacheDirective(id);
// OP_REMOVE_CACHE_POOL
filesystem.removeCachePool("pool1");
// OP_SET_ACL
filesystem.setAcl(pathConcatTarget, Lists.<AclEntry> newArrayList());
}
public static void abortStream(DFSOutputStream out) throws IOException {

View File

@ -31,6 +31,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@ -65,6 +66,7 @@ public class TestSafeMode {
public void startUp() throws IOException {
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
@ -328,12 +330,48 @@ public void run(FileSystem fs) throws IOException {
fs.setTimes(file1, 0, 0);
}});
runFsFun("modifyAclEntries while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.modifyAclEntries(file1, Lists.<AclEntry>newArrayList());
}});
runFsFun("removeAclEntries while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeAclEntries(file1, Lists.<AclEntry>newArrayList());
}});
runFsFun("removeDefaultAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeDefaultAcl(file1);
}});
runFsFun("removeAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeAcl(file1);
}});
runFsFun("setAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setAcl(file1, Lists.<AclEntry>newArrayList());
}});
try {
DFSTestUtil.readFile(fs, file1);
} catch (IOException ioe) {
fail("Set times failed while in SM");
}
try {
fs.getAclStatus(file1);
} catch (IOException ioe) {
fail("getAclStatus failed while in SM");
}
assertFalse("Could not leave SM",
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}

View File

@ -26,6 +26,11 @@
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
@ -68,6 +73,7 @@
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.base.Joiner;
@ -580,4 +586,39 @@ public void testChecksumTypeProto() {
assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C),
HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C);
}
@Test
public void testAclEntryProto() {
// All fields populated.
AclEntry e1 = new AclEntry.Builder().setName("test")
.setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT)
.setType(AclEntryType.OTHER).build();
// No name.
AclEntry e2 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER).setPermission(FsAction.ALL).build();
// No permission, which will default to the 0'th enum element.
AclEntry e3 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER).setName("test").build();
AclEntry[] expected = new AclEntry[] { e1, e2,
new AclEntry.Builder()
.setScope(e3.getScope())
.setType(e3.getType())
.setName(e3.getName())
.setPermission(FsAction.NONE)
.build() };
AclEntry[] actual = Lists.newArrayList(
PBHelper.convertAclEntry(PBHelper.convertAclEntryProto(Lists
.newArrayList(e1, e2, e3)))).toArray(new AclEntry[0]);
Assert.assertArrayEquals(expected, actual);
}
@Test
public void testAclStatusProto() {
AclEntry e = new AclEntry.Builder().setName("test")
.setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT)
.setType(AclEntryType.OTHER).build();
AclStatus s = new AclStatus.Builder().owner("foo").group("bar").addEntry(e)
.build();
Assert.assertEquals(s, PBHelper.convert(PBHelper.convert(s)));
}
}

View File

@ -0,0 +1,155 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Helper methods useful for writing ACL tests.
*/
public final class AclTestHelpers {
/**
* Create a new AclEntry with scope, type and permission (no name).
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param permission FsAction set of permissions in the ACL entry
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
FsAction permission) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setPermission(permission)
.build();
}
/**
* Create a new AclEntry with scope, type, name and permission.
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param name String optional ACL entry name
* @param permission FsAction set of permissions in the ACL entry
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
String name, FsAction permission) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setName(name)
.setPermission(permission)
.build();
}
/**
* Create a new AclEntry with scope, type and name (no permission).
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param name String optional ACL entry name
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
String name) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setName(name)
.build();
}
/**
* Create a new AclEntry with scope and type (no name or permission).
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.build();
}
/**
* Asserts that permission is denied to the given fs/user for the given file.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path file to check
* @throws Exception if there is an unexpected error
*/
public static void assertFilePermissionDenied(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
DFSTestUtil.readFileBuffer(fs, pathToCheck);
fail("expected AccessControlException for user " + user + ", path = " +
pathToCheck);
} catch (AccessControlException e) {
// expected
}
}
/**
* Asserts that permission is granted to the given fs/user for the given file.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path file to check
* @throws Exception if there is an unexpected error
*/
public static void assertFilePermissionGranted(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
DFSTestUtil.readFileBuffer(fs, pathToCheck);
} catch (AccessControlException e) {
fail("expected permission granted for user " + user + ", path = " +
pathToCheck);
}
}
/**
* Asserts the value of the FsPermission bits on the inode of a specific path.
*
* @param fs FileSystem to use for check
* @param pathToCheck Path inode to check
* @param perm short expected permission bits
* @throws IOException thrown if there is an I/O error
*/
public static void assertPermission(FileSystem fs, Path pathToCheck,
short perm) throws IOException {
assertEquals(perm, fs.getFileStatus(pathToCheck).getPermission().toShort());
}
}

View File

@ -96,6 +96,7 @@ public void startCluster(String dfsDir) throws IOException {
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
config.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster =
new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build();
cluster.waitClusterUp();

View File

@ -0,0 +1,189 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.google.common.collect.Lists;
/**
* Tests that the configuration flag that controls support for ACLs is off by
* default and causes all attempted operations related to ACLs to fail. The
* NameNode can still load ACLs from fsimage or edits.
*/
public class TestAclConfigFlag {
private static final Path PATH = new Path("/path");
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
@Rule
public ExpectedException exception = ExpectedException.none();
@After
public void shutdown() throws Exception {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testModifyAclEntries() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.modifyAclEntries(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testRemoveAclEntries() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.removeAclEntries(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testRemoveDefaultAcl() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.removeAclEntries(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testRemoveAcl() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.removeAcl(PATH);
}
@Test
public void testSetAcl() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.setAcl(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testGetAclStatus() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.getAclStatus(PATH);
}
@Test
public void testEditLog() throws Exception {
// With ACLs enabled, set an ACL.
initCluster(true, true);
fs.mkdirs(PATH);
fs.setAcl(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
// Restart with ACLs disabled. Expect successful restart.
restart(false, false);
}
@Test
public void testFsImage() throws Exception {
// With ACLs enabled, set an ACL.
initCluster(true, true);
fs.mkdirs(PATH);
fs.setAcl(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
// Save a new checkpoint and restart with ACLs still enabled.
restart(true, true);
// Restart with ACLs disabled. Expect successful restart.
restart(false, false);
}
/**
* We expect an AclException, and we want the exception text to state the
* configuration key that controls ACL support.
*/
private void expectException() {
exception.expect(AclException.class);
exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY);
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @param aclsEnabled if true, ACL support is enabled
* @throws Exception if any step fails
*/
private void initCluster(boolean format, boolean aclsEnabled)
throws Exception {
Configuration conf = new Configuration();
// not explicitly setting to false, should be false by default
if (aclsEnabled) {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
}
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @param aclsEnabled if true, ACL support is enabled
* @throws Exception if restart fails
*/
private void restart(boolean checkpoint, boolean aclsEnabled)
throws Exception {
NameNode nameNode = cluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false, aclsEnabled);
}
}

View File

@ -0,0 +1,227 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.Lists;
public class TestFSImageWithAcl {
private static Configuration conf;
private static MiniDFSCluster cluster;
@BeforeClass
public static void setUp() throws IOException {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
}
@AfterClass
public static void tearDown() {
cluster.shutdown();
}
private void testAcl(boolean persistNamespace) throws IOException {
Path p = new Path("/p");
DistributedFileSystem fs = cluster.getFileSystem();
fs.create(p).close();
fs.mkdirs(new Path("/23"));
AclEntry e = new AclEntry.Builder().setName("foo")
.setPermission(READ_EXECUTE).setScope(ACCESS).setType(USER).build();
fs.modifyAclEntries(p, Lists.newArrayList(e));
restart(fs, persistNamespace);
AclStatus s = cluster.getNamesystem().getAclStatus(p.toString());
AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray(
new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(ACCESS, GROUP, READ) }, returned);
fs.removeAcl(p);
if (persistNamespace) {
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode();
cluster.waitActive();
s = cluster.getNamesystem().getAclStatus(p.toString());
returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] { }, returned);
fs.modifyAclEntries(p, Lists.newArrayList(e));
s = cluster.getNamesystem().getAclStatus(p.toString());
returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(ACCESS, GROUP, READ) }, returned);
}
@Test
public void testPersistAcl() throws IOException {
testAcl(true);
}
@Test
public void testAclEditLog() throws IOException {
testAcl(false);
}
private void doTestDefaultAclNewChildren(boolean persistNamespace)
throws IOException {
Path dirPath = new Path("/dir");
Path filePath = new Path(dirPath, "file1");
Path subdirPath = new Path(dirPath, "subdir1");
DistributedFileSystem fs = cluster.getFileSystem();
fs.mkdirs(dirPath);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(dirPath, aclSpec);
fs.create(filePath).close();
fs.mkdirs(subdirPath);
AclEntry[] fileExpected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
AclEntry[] subdirExpected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclEntry[] fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE));
fs.modifyAclEntries(dirPath, aclSpec);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
fs.removeAcl(dirPath);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
}
@Test
public void testFsImageDefaultAclNewChildren() throws IOException {
doTestDefaultAclNewChildren(true);
}
@Test
public void testEditLogDefaultAclNewChildren() throws IOException {
doTestDefaultAclNewChildren(false);
}
/**
* Restart the NameNode, optionally saving a new checkpoint.
*
* @param fs DistributedFileSystem used for saving namespace
* @param persistNamespace boolean true to save a new checkpoint
* @throws IOException if restart fails
*/
private void restart(DistributedFileSystem fs, boolean persistNamespace)
throws IOException {
if (persistNamespace) {
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode();
cluster.waitActive();
}
}

View File

@ -0,0 +1,417 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.Arrays;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Unit tests covering FSPermissionChecker. All tests in this suite have been
* cross-validated against Linux setfacl/getfacl to check for consistency of the
* HDFS implementation.
*/
public class TestFSPermissionChecker {
private static final long PREFERRED_BLOCK_SIZE = 128 * 1024 * 1024;
private static final short REPLICATION = 3;
private static final String SUPERGROUP = "supergroup";
private static final String SUPERUSER = "superuser";
private static final UserGroupInformation BRUCE =
UserGroupInformation.createUserForTesting("bruce", new String[] { });
private static final UserGroupInformation DIANA =
UserGroupInformation.createUserForTesting("diana", new String[] { "sales" });
private static final UserGroupInformation CLARK =
UserGroupInformation.createUserForTesting("clark", new String[] { "execs" });
private INodeDirectory inodeRoot;
@Before
public void setUp() {
PermissionStatus permStatus = PermissionStatus.createImmutable(SUPERUSER,
SUPERGROUP, FsPermission.createImmutable((short)0755));
inodeRoot = new INodeDirectory(INodeId.ROOT_INODE_ID,
INodeDirectory.ROOT_NAME, permStatus, 0L);
}
@Test
public void testAclOwner() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(BRUCE, "/file1", READ);
assertPermissionGranted(BRUCE, "/file1", WRITE);
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionDenied(BRUCE, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
}
@Test
public void testAclNamedUser() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclNamedUserDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", NONE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", READ);
}
@Test
public void testAclNamedUserTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "diana", NONE),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, MASK, READ_EXECUTE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", ALL);
}
@Test
public void testAclNamedUserMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0620);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, WRITE),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclGroup() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclGroupDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0604);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, MASK, NONE),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclGroupTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, MASK, NONE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", ALL);
}
@Test
public void testAclGroupTraverseDenyOnlyDefaultEntries() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, "sales", NONE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", ALL);
}
@Test
public void testAclGroupMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclNamedGroup() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "sales", READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclNamedGroupDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "execs", NONE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclNamedGroupTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "sales", NONE),
aclEntry(ACCESS, MASK, READ_EXECUTE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", ALL);
}
@Test
public void testAclNamedGroupMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "sales", READ_WRITE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclOther() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0774);
addAcl(inodeFile,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", ALL);
assertPermissionGranted(DIANA, "/file1", ALL);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
private void addAcl(INodeWithAdditionalFields inode, AclEntry... acl)
throws IOException {
AclStorage.updateINodeAcl((INodeWithAdditionalFields)inode,
Arrays.asList(acl), Snapshot.CURRENT_STATE_ID);
}
private void assertPermissionGranted(UserGroupInformation user, String path,
FsAction access) throws IOException {
new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path,
inodeRoot, false, null, null, access, null, true);
}
private void assertPermissionDenied(UserGroupInformation user, String path,
FsAction access) throws IOException {
try {
new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path,
inodeRoot, false, null, null, access, null, true);
fail("expected AccessControlException for user + " + user + ", path = " +
path + ", access = " + access);
} catch (AccessControlException e) {
// expected
}
}
private static INodeDirectory createINodeDirectory(INodeDirectory parent,
String name, String owner, String group, short perm) throws IOException {
PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group,
FsPermission.createImmutable(perm));
INodeDirectory inodeDirectory = new INodeDirectory(
INodeId.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L);
parent.addChild(inodeDirectory);
return inodeDirectory;
}
private static INodeFile createINodeFile(INodeDirectory parent, String name,
String owner, String group, short perm) throws IOException {
PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group,
FsPermission.createImmutable(perm));
INodeFile inodeFile = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION,
PREFERRED_BLOCK_SIZE);
parent.addChild(inodeFile);
return inodeFile;
}
}

View File

@ -0,0 +1,38 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.BeforeClass;
/**
* Tests NameNode interaction for all ACL modification APIs. This test suite
* also covers interaction of setPermission with inodes that have ACLs.
*/
public class TestNameNodeAcl extends FSAclBaseTest {
@BeforeClass
public static void init() throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
}
}

View File

@ -91,6 +91,7 @@ public void setup() throws Exception {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();

View File

@ -125,6 +125,7 @@ public void setup() throws Exception {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(DataNodes).build();

View File

@ -0,0 +1,741 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.junit.Assert.*;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.google.common.collect.Lists;
/**
* Tests interaction of ACLs with snapshots.
*/
public class TestAclWithSnapshot {
private static final UserGroupInformation BRUCE =
UserGroupInformation.createUserForTesting("bruce", new String[] { });
private static final UserGroupInformation DIANA =
UserGroupInformation.createUserForTesting("diana", new String[] { });
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem fsAsBruce, fsAsDiana;
private static DistributedFileSystem hdfs;
private static int pathCount = 0;
private static Path path, snapshotPath;
private static String snapshotName;
@Rule
public ExpectedException exception = ExpectedException.none();
@BeforeClass
public static void init() throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
initCluster(true);
}
@AfterClass
public static void shutdown() throws Exception {
IOUtils.cleanup(null, hdfs, fsAsBruce, fsAsDiana);
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void setUp() {
++pathCount;
path = new Path("/p" + pathCount);
snapshotName = "snapshot" + pathCount;
snapshotPath = new Path(path, new Path(".snapshot", snapshotName));
}
@Test
public void testOriginalAclEnforcedForSnapshotRootAfterChange()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
// Original has changed, but snapshot still has old ACL.
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(false);
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(true);
doSnapshotRootChangeAssertions(path, snapshotPath);
}
private static void doSnapshotRootChangeAssertions(Path path,
Path snapshotPath) throws Exception {
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0550, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionGranted(fsAsDiana, DIANA, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotContentsAfterChange()
throws Exception {
Path filePath = new Path(path, "file1");
Path subdirPath = new Path(path, "subdir1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
Path subdirSnapshotPath = new Path(snapshotPath, "subdir1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777));
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable(
(short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
assertFilePermissionGranted(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirPath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
// Original has changed, but snapshot still has old ACL.
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(false);
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(true);
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
}
private static void doSnapshotContentsChangeAssertions(Path filePath,
Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath)
throws Exception {
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0570, filePath);
assertFilePermissionDenied(fsAsBruce, BRUCE, filePath);
assertFilePermissionGranted(fsAsDiana, DIANA, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0570, subdirPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionGranted(fsAsDiana, DIANA, subdirPath);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotRootAfterRemoval()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
hdfs.removeAcl(path);
// Original has changed, but snapshot still has old ACL.
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(false);
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(true);
doSnapshotRootRemovalAssertions(path, snapshotPath);
}
private static void doSnapshotRootRemovalAssertions(Path path,
Path snapshotPath) throws Exception {
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0700, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotContentsAfterRemoval()
throws Exception {
Path filePath = new Path(path, "file1");
Path subdirPath = new Path(path, "subdir1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
Path subdirSnapshotPath = new Path(snapshotPath, "subdir1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777));
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable(
(short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
assertFilePermissionGranted(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirPath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
hdfs.removeAcl(filePath);
hdfs.removeAcl(subdirPath);
// Original has changed, but snapshot still has old ACL.
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(false);
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(true);
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
}
private static void doSnapshotContentsRemovalAssertions(Path filePath,
Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath)
throws Exception {
AclEntry[] expected = new AclEntry[] { };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0500, filePath);
assertFilePermissionDenied(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0500, subdirPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
}
@Test
public void testModifyReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL));
hdfs.modifyAclEntries(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana", READ_EXECUTE));
hdfs.modifyAclEntries(path, aclSpec);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", ALL),
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0770, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionGranted(fsAsDiana, DIANA, path);
}
@Test
public void testRemoveReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL));
hdfs.modifyAclEntries(path, aclSpec);
hdfs.removeAcl(path);
AclEntry[] expected = new AclEntry[] { };
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0700, path);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
}
@Test
public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE));
hdfs.modifyAclEntries(path, aclSpec);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)0700, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)0700, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, snapshotPath);
}
@Test
public void testModifyAclEntriesSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE));
exception.expect(SnapshotAccessControlException.class);
hdfs.modifyAclEntries(snapshotPath, aclSpec);
}
@Test
public void testRemoveAclEntriesSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce"));
exception.expect(SnapshotAccessControlException.class);
hdfs.removeAclEntries(snapshotPath, aclSpec);
}
@Test
public void testRemoveDefaultAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
exception.expect(SnapshotAccessControlException.class);
hdfs.removeDefaultAcl(snapshotPath);
}
@Test
public void testRemoveAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
exception.expect(SnapshotAccessControlException.class);
hdfs.removeAcl(snapshotPath);
}
@Test
public void testSetAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce"));
exception.expect(SnapshotAccessControlException.class);
hdfs.setAcl(snapshotPath, aclSpec);
}
@Test
public void testChangeAclExceedsQuota() throws Exception {
Path filePath = new Path(path, "file1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_WRITE));
hdfs.modifyAclEntries(filePath, aclSpec);
hdfs.createSnapshot(path, snapshotName);
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ));
exception.expect(NSQuotaExceededException.class);
hdfs.modifyAclEntries(filePath, aclSpec);
}
@Test
public void testRemoveAclExceedsQuota() throws Exception {
Path filePath = new Path(path, "file1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_WRITE));
hdfs.modifyAclEntries(filePath, aclSpec);
hdfs.createSnapshot(path, snapshotName);
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ));
exception.expect(NSQuotaExceededException.class);
hdfs.removeAcl(filePath);
}
@Test
public void testGetAclStatusDotSnapshotPath() throws Exception {
hdfs.mkdirs(path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
AclStatus s = hdfs.getAclStatus(new Path(path, ".snapshot"));
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
}
/**
* Asserts that permission is denied to the given fs/user for the given
* directory.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path directory to check
* @throws Exception if there is an unexpected error
*/
private static void assertDirPermissionDenied(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.listStatus(pathToCheck);
fail("expected AccessControlException for user " + user + ", path = " +
pathToCheck);
} catch (AccessControlException e) {
// expected
}
}
/**
* Asserts that permission is granted to the given fs/user for the given
* directory.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path directory to check
* @throws Exception if there is an unexpected error
*/
private static void assertDirPermissionGranted(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.listStatus(pathToCheck);
} catch (AccessControlException e) {
fail("expected permission granted for user " + user + ", path = " +
pathToCheck);
}
}
/**
* Asserts the value of the FsPermission bits on the inode of the test path.
*
* @param perm short expected permission bits
* @param pathToCheck Path to check
* @throws Exception thrown if there is an unexpected error
*/
private static void assertPermission(short perm, Path pathToCheck)
throws Exception {
AclTestHelpers.assertPermission(hdfs, pathToCheck, perm);
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem
* instances for our test users.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @throws Exception if any step fails
*/
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf);
fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf);
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @throws Exception if restart fails
*/
private static void restart(boolean checkpoint) throws Exception {
NameNode nameNode = cluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false);
}
}

View File

@ -305,7 +305,8 @@ static void modify(INode inode, final List<INode> current,
final int i = Diff.search(current, inode.getKey());
Assert.assertTrue(i >= 0);
final INodeDirectory oldinode = (INodeDirectory)current.get(i);
final INodeDirectory newinode = new INodeDirectory(oldinode, false, true);
final INodeDirectory newinode = new INodeDirectory(oldinode, false,
oldinode.getFeatures());
newinode.setModificationTime(oldinode.getModificationTime() + 1);
current.set(i, newinode);

View File

@ -17,11 +17,19 @@
*/
package org.apache.hadoop.hdfs.web;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -32,6 +40,8 @@
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
import com.google.common.collect.Lists;
public class TestJsonUtil {
static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
@ -135,6 +145,47 @@ public void testToDatanodeInfoWithName() throws Exception {
response.put("ipAddr", "127.0.0.1");
checkDecodeFailure(response);
}
@Test
public void testToAclStatus() {
String jsonString =
"{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
Map<?, ?> json = (Map<?, ?>) JSON.parse(jsonString);
List<AclEntry> aclSpec =
Lists.newArrayList(aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "user1", READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
aclStatusBuilder.owner("testuser");
aclStatusBuilder.group("supergroup");
aclStatusBuilder.addEntries(aclSpec);
aclStatusBuilder.stickyBit(false);
Assert.assertEquals("Should be equal", aclStatusBuilder.build(),
JsonUtil.toAclStatus(json));
}
@Test
public void testToJsonFromAclStatus() {
String jsonString =
"{\"AclStatus\":{\"entries\":[\"user:user1:rwx\",\"group::rw-\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
aclStatusBuilder.owner("testuser");
aclStatusBuilder.group("supergroup");
aclStatusBuilder.stickyBit(false);
List<AclEntry> aclSpec =
Lists.newArrayList(aclEntry(ACCESS, USER,"user1", ALL),
aclEntry(ACCESS, GROUP, READ_WRITE));
aclStatusBuilder.addEntries(aclSpec);
Assert.assertEquals(jsonString,
JsonUtil.toJsonString(aclStatusBuilder.build()));
}
private void checkDecodeFailure(Map<String, Object> map) {
try {

View File

@ -0,0 +1,76 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
/**
* Tests ACL APIs via WebHDFS.
*/
public class TestWebHDFSAcl extends FSAclBaseTest {
@BeforeClass
public static void init() throws Exception {
conf = WebHdfsTestUtil.createConf();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
}
/**
* We need to skip this test on WebHDFS, because WebHDFS currently cannot
* resolve symlinks.
*/
@Override
@Test
@Ignore
public void testDefaultAclNewSymlinkIntermediate() {
}
/**
* Overridden to provide a WebHdfsFileSystem wrapper for the super-user.
*
* @return WebHdfsFileSystem for super-user
* @throws Exception if creation fails
*/
@Override
protected WebHdfsFileSystem createFileSystem() throws Exception {
return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
}
/**
* Overridden to provide a WebHdfsFileSystem wrapper for a specific user.
*
* @param user UserGroupInformation specific user
* @return WebHdfsFileSystem for specific user
* @throws Exception if creation fails
*/
@Override
protected WebHdfsFileSystem createFileSystem(UserGroupInformation user)
throws Exception {
return WebHdfsTestUtil.getWebHdfsFileSystemAs(user, conf,
WebHdfsFileSystem.SCHEME);
}
}

View File

@ -21,12 +21,14 @@
import static org.junit.Assert.assertNull;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.util.StringUtils;
@ -300,4 +302,48 @@ public void testUserNameOkAfterResettingPattern() {
UserParam.setUserPatternDomain(oldDomain);
}
@Test
public void testAclPermissionParam() {
final AclPermissionParam p =
new AclPermissionParam("user::rwx,group::r--,other::rwx,user:user1:rwx");
List<AclEntry> setAclList =
AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",
true);
Assert.assertEquals(setAclList.toString(), p.getAclPermission(true)
.toString());
new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx");
try {
new AclPermissionParam("user::rw--,group::rwx-,other::rw-");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
new AclPermissionParam(
"user::rw-,group::rwx,other::rw-,user:user1:rwx,group:group1:rwx,other::rwx,mask::rwx,default:user:user1:rwx");
try {
new AclPermissionParam("user:r-,group:rwx,other:rw-");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new AclPermissionParam("default:::r-,default:group::rwx,other::rw-");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
}

View File

@ -17,6 +17,10 @@
*/
package org.apache.hadoop.security;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@ -24,6 +28,7 @@
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -66,6 +71,7 @@ public class TestPermissionSymlinks {
@BeforeClass
public static void beforeClassSetUp() throws Exception {
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.set(FsPermission.UMASK_LABEL, "000");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
@ -101,8 +107,43 @@ public void tearDown() throws Exception {
@Test(timeout = 5000)
public void testDelete() throws Exception {
// Try to delete where the symlink's parent dir is not writable
fs.setPermission(linkParent, new FsPermission((short) 0555));
doDeleteLinkParentNotWritable();
fs.setPermission(linkParent, new FsPermission((short) 0777));
fs.setPermission(targetParent, new FsPermission((short) 0555));
fs.setPermission(target, new FsPermission((short) 0555));
doDeleteTargetParentAndTargetNotWritable();
}
@Test
public void testAclDelete() throws Exception {
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doDeleteLinkParentNotWritable();
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(targetParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doDeleteTargetParentAndTargetNotWritable();
}
private void doDeleteLinkParentNotWritable() throws Exception {
// Try to delete where the symlink's parent dir is not writable
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
@ -116,11 +157,11 @@ public Object run() throws IOException {
} catch (AccessControlException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
}
private void doDeleteTargetParentAndTargetNotWritable() throws Exception {
// Try a delete where the symlink parent dir is writable,
// but the target's parent and target are not
fs.setPermission(linkParent, new FsPermission((short) 0777));
fs.setPermission(targetParent, new FsPermission((short) 0555));
fs.setPermission(target, new FsPermission((short) 0555));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
@ -139,6 +180,20 @@ public Object run() throws IOException {
@Test(timeout = 5000)
public void testReadWhenTargetNotReadable() throws Exception {
fs.setPermission(target, new FsPermission((short) 0000));
doReadTargetNotReadable();
}
@Test
public void testAclReadTargetNotReadable() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, user.getUserName(), NONE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, READ)));
doReadTargetNotReadable();
}
private void doReadTargetNotReadable() throws Exception {
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
@ -157,8 +212,22 @@ public Object run() throws IOException {
@Test(timeout = 5000)
public void testFileStatus() throws Exception {
// Try to getFileLinkStatus the link when the target is not readable
fs.setPermission(target, new FsPermission((short) 0000));
doGetFileLinkStatusTargetNotReadable();
}
@Test
public void testAclGetFileLinkStatusTargetNotReadable() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, user.getUserName(), NONE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, READ)));
doGetFileLinkStatusTargetNotReadable();
}
private void doGetFileLinkStatusTargetNotReadable() throws Exception {
// Try to getFileLinkStatus the link when the target is not readable
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
@ -176,9 +245,28 @@ public Object run() throws IOException {
@Test(timeout = 5000)
public void testRenameLinkTargetNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
fs.setPermission(target, new FsPermission((short) 0555));
fs.setPermission(targetParent, new FsPermission((short) 0555));
doRenameLinkTargetNotWritableFC();
}
@Test
public void testAclRenameTargetNotWritableFC() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(targetParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameLinkTargetNotWritableFC();
}
private void doRenameLinkTargetNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
@ -197,8 +285,22 @@ public Object run() throws IOException {
@Test(timeout = 5000)
public void testRenameSrcNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
fs.setPermission(linkParent, new FsPermission((short) 0555));
doRenameSrcNotWritableFC();
}
@Test
public void testAclRenameSrcNotWritableFC() throws Exception {
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameSrcNotWritableFC();
}
private void doRenameSrcNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
@ -220,9 +322,28 @@ public Object run() throws IOException {
@Test(timeout = 5000)
public void testRenameLinkTargetNotWritableFS() throws Exception {
// Rename the link when the target and parent are not writable
fs.setPermission(target, new FsPermission((short) 0555));
fs.setPermission(targetParent, new FsPermission((short) 0555));
doRenameLinkTargetNotWritableFS();
}
@Test
public void testAclRenameTargetNotWritableFS() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(targetParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameLinkTargetNotWritableFS();
}
private void doRenameLinkTargetNotWritableFS() throws Exception {
// Rename the link when the target and parent are not writable
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
@ -241,8 +362,22 @@ public Object run() throws IOException {
@Test(timeout = 5000)
public void testRenameSrcNotWritableFS() throws Exception {
// Rename the link when the target and parent are not writable
fs.setPermission(linkParent, new FsPermission((short) 0555));
doRenameSrcNotWritableFS();
}
@Test
public void testAclRenameSrcNotWritableFS() throws Exception {
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameSrcNotWritableFS();
}
private void doRenameSrcNotWritableFS() throws Exception {
// Rename the link when the target and parent are not writable
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
@ -258,6 +393,4 @@ public Object run() throws IOException {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
}
}

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<EDITS>
<EDITS_VERSION>-52</EDITS_VERSION>
<EDITS_VERSION>-53</EDITS_VERSION>
<RECORD>
<OPCODE>OP_START_LOG_SEGMENT</OPCODE>
<DATA>
@ -13,8 +13,8 @@
<TXID>2</TXID>
<DELEGATION_KEY>
<KEY_ID>1</KEY_ID>
<EXPIRY_DATE>1390942564729</EXPIRY_DATE>
<KEY>f270a35a4ebb9984</KEY>
<EXPIRY_DATE>1390519460949</EXPIRY_DATE>
<KEY>dc8d30edc97df67d</KEY>
</DELEGATION_KEY>
</DATA>
</RECORD>
@ -24,8 +24,8 @@
<TXID>3</TXID>
<DELEGATION_KEY>
<KEY_ID>2</KEY_ID>
<EXPIRY_DATE>1390942564735</EXPIRY_DATE>
<KEY>22391ec22bc0fc20</KEY>
<EXPIRY_DATE>1390519460952</EXPIRY_DATE>
<KEY>096bc20b6debed03</KEY>
</DELEGATION_KEY>
</DATA>
</RECORD>
@ -37,18 +37,18 @@
<INODEID>16386</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251365583</MTIME>
<ATIME>1390251365583</ATIME>
<MTIME>1389828264873</MTIME>
<ATIME>1389828264873</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>7</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>9</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -59,13 +59,13 @@
<INODEID>0</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251365626</MTIME>
<ATIME>1390251365583</ATIME>
<MTIME>1389828265699</MTIME>
<ATIME>1389828264873</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -78,9 +78,9 @@
<LENGTH>0</LENGTH>
<SRC>/file_create</SRC>
<DST>/file_moved</DST>
<TIMESTAMP>1390251365645</TIMESTAMP>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>9</RPC_CALLID>
<TIMESTAMP>1389828265705</TIMESTAMP>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>11</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -89,9 +89,9 @@
<TXID>7</TXID>
<LENGTH>0</LENGTH>
<PATH>/file_moved</PATH>
<TIMESTAMP>1390251365666</TIMESTAMP>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>10</RPC_CALLID>
<TIMESTAMP>1389828265712</TIMESTAMP>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>12</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -101,9 +101,9 @@
<LENGTH>0</LENGTH>
<INODEID>16387</INODEID>
<PATH>/directory_mkdir</PATH>
<TIMESTAMP>1390251365693</TIMESTAMP>
<TIMESTAMP>1389828265722</TIMESTAMP>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>493</MODE>
</PERMISSION_STATUS>
@ -136,8 +136,8 @@
<TXID>12</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>15</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>17</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -147,8 +147,8 @@
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
<SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>16</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>18</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -157,8 +157,8 @@
<TXID>14</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>17</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>19</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -169,18 +169,18 @@
<INODEID>16388</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251365804</MTIME>
<ATIME>1390251365804</ATIME>
<MTIME>1389828265757</MTIME>
<ATIME>1389828265757</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>18</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>20</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -191,13 +191,13 @@
<INODEID>0</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251365815</MTIME>
<ATIME>1390251365804</ATIME>
<MTIME>1389828265759</MTIME>
<ATIME>1389828265757</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -253,10 +253,10 @@
<LENGTH>0</LENGTH>
<SRC>/file_create</SRC>
<DST>/file_moved</DST>
<TIMESTAMP>1390251365931</TIMESTAMP>
<TIMESTAMP>1389828265782</TIMESTAMP>
<OPTIONS>NONE</OPTIONS>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>25</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>27</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -267,18 +267,18 @@
<INODEID>16389</INODEID>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251365952</MTIME>
<ATIME>1390251365952</ATIME>
<MTIME>1389828265787</MTIME>
<ATIME>1389828265787</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>27</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>29</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -383,8 +383,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251366514</MTIME>
<ATIME>1390251365952</ATIME>
<MTIME>1389828266540</MTIME>
<ATIME>1389828265787</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@ -404,7 +404,7 @@
<GENSTAMP>1003</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -418,18 +418,18 @@
<INODEID>16390</INODEID>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251366533</MTIME>
<ATIME>1390251366533</ATIME>
<MTIME>1389828266544</MTIME>
<ATIME>1389828266544</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>40</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>41</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -534,8 +534,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251366726</MTIME>
<ATIME>1390251366533</ATIME>
<MTIME>1389828266569</MTIME>
<ATIME>1389828266544</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@ -555,7 +555,7 @@
<GENSTAMP>1006</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -569,18 +569,18 @@
<INODEID>16391</INODEID>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251366746</MTIME>
<ATIME>1390251366746</ATIME>
<MTIME>1389828266572</MTIME>
<ATIME>1389828266572</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>52</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>53</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -685,8 +685,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251366795</MTIME>
<ATIME>1390251366746</ATIME>
<MTIME>1389828266599</MTIME>
<ATIME>1389828266572</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@ -706,7 +706,7 @@
<GENSTAMP>1009</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -718,13 +718,13 @@
<TXID>56</TXID>
<LENGTH>0</LENGTH>
<TRG>/file_concat_target</TRG>
<TIMESTAMP>1390251366802</TIMESTAMP>
<TIMESTAMP>1389828266603</TIMESTAMP>
<SOURCES>
<SOURCE1>/file_concat_0</SOURCE1>
<SOURCE2>/file_concat_1</SOURCE2>
</SOURCES>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>63</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>64</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -735,15 +735,15 @@
<INODEID>16392</INODEID>
<PATH>/file_symlink</PATH>
<VALUE>/file_concat_target</VALUE>
<MTIME>1390251366811</MTIME>
<ATIME>1390251366811</ATIME>
<MTIME>1389828266633</MTIME>
<ATIME>1389828266633</ATIME>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>511</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>64</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>66</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -754,18 +754,18 @@
<INODEID>16393</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251366819</MTIME>
<ATIME>1390251366819</ATIME>
<MTIME>1389828266637</MTIME>
<ATIME>1389828266637</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>65</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>67</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -821,23 +821,7 @@
<OPCODE>OP_REASSIGN_LEASE</OPCODE>
<DATA>
<TXID>64</TXID>
<LEASEHOLDER>DFSClient_NONMAPREDUCE_382541401_1</LEASEHOLDER>
<PATH>/hard-lease-recovery-test</PATH>
<NEWHOLDER>HDFS_NameNode</NEWHOLDER>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
<DATA>
<TXID>65</TXID>
<GENSTAMPV2>1012</GENSTAMPV2>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_REASSIGN_LEASE</OPCODE>
<DATA>
<TXID>66</TXID>
<LEASEHOLDER>HDFS_NameNode</LEASEHOLDER>
<LEASEHOLDER>DFSClient_NONMAPREDUCE_16108824_1</LEASEHOLDER>
<PATH>/hard-lease-recovery-test</PATH>
<NEWHOLDER>HDFS_NameNode</NEWHOLDER>
</DATA>
@ -845,23 +829,23 @@
<RECORD>
<OPCODE>OP_CLOSE</OPCODE>
<DATA>
<TXID>67</TXID>
<TXID>65</TXID>
<LENGTH>0</LENGTH>
<INODEID>0</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1390251371402</MTIME>
<ATIME>1390251366819</ATIME>
<MTIME>1389828269751</MTIME>
<ATIME>1389828266637</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<BLOCK>
<BLOCK_ID>1073741834</BLOCK_ID>
<NUM_BYTES>11</NUM_BYTES>
<GENSTAMP>1012</GENSTAMP>
<GENSTAMP>1011</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
<USERNAME>andrew</USERNAME>
<USERNAME>jing</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -870,72 +854,79 @@
<RECORD>
<OPCODE>OP_ADD_CACHE_POOL</OPCODE>
<DATA>
<TXID>68</TXID>
<TXID>66</TXID>
<POOLNAME>pool1</POOLNAME>
<OWNERNAME>andrew</OWNERNAME>
<GROUPNAME>andrew</GROUPNAME>
<OWNERNAME>jing</OWNERNAME>
<GROUPNAME>staff</GROUPNAME>
<MODE>493</MODE>
<LIMIT>9223372036854775807</LIMIT>
<MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>73</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>74</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_MODIFY_CACHE_POOL</OPCODE>
<DATA>
<TXID>69</TXID>
<TXID>67</TXID>
<POOLNAME>pool1</POOLNAME>
<LIMIT>99</LIMIT>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>74</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>75</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ADD_CACHE_DIRECTIVE</OPCODE>
<DATA>
<TXID>70</TXID>
<TXID>68</TXID>
<ID>1</ID>
<PATH>/path</PATH>
<REPLICATION>1</REPLICATION>
<POOL>pool1</POOL>
<EXPIRATION>2305844399465065912</EXPIRATION>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>75</RPC_CALLID>
<EXPIRATION>2305844399041964876</EXPIRATION>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>76</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_MODIFY_CACHE_DIRECTIVE</OPCODE>
<DATA>
<TXID>71</TXID>
<TXID>69</TXID>
<ID>1</ID>
<REPLICATION>2</REPLICATION>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>76</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>77</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_REMOVE_CACHE_DIRECTIVE</OPCODE>
<DATA>
<TXID>72</TXID>
<TXID>70</TXID>
<ID>1</ID>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>77</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>78</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_REMOVE_CACHE_POOL</OPCODE>
<DATA>
<TXID>73</TXID>
<TXID>71</TXID>
<POOLNAME>pool1</POOLNAME>
<RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
<RPC_CALLID>78</RPC_CALLID>
<RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
<RPC_CALLID>79</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_END_LOG_SEGMENT</OPCODE>
<DATA>
<TXID>74</TXID>
<TXID>72</TXID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_ACL</OPCODE>
<DATA>
<TXID>73</TXID>
<SRC>/file_set_acl</SRC>
</DATA>
</RECORD>
</EDITS>

View File

@ -0,0 +1,976 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="testConf.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<!-- Normal mode is test. To run just the commands and dump the output
to the log, set it to nocompare -->
<mode>test</mode>
<!-- Comparator types:
ExactComparator
SubstringComparator
RegexpComparator
TokenComparator
-->
<tests>
<!-- Tests for setfacl and getfacl-->
<test>
<description>getfacl: basic permissions</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -getfacl /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /file1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getfacl: basic permissions for directory</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Add an ACL</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r-- /file1</command>
<command>-fs NAMENODE -getfacl /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /file1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:bob:r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Add multiple ACLs at once</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /file1</command>
<command>-fs NAMENODE -getfacl /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /file1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:bob:r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:users:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Remove an ACL</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r--,user:charlie:r-x /file1</command>
<command>-fs NAMENODE -setfacl -x user:bob /file1</command>
<command>-fs NAMENODE -getfacl /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /file1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:charlie:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!bob)*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Add default ACL</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:bob:r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:users:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user:charlie:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group:admin:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:mask::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:other::r-x</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : try adding default ACL to file</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>setfacl: Invalid ACL: only directories may have a default ACL</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Remove one default ACL</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -setfacl -x default:user:charlie /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:bob:r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:users:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group:admin:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:mask::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:other::r-x</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!default:user:charlie).*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Remove all default ACL</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -setfacl -k /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:bob:r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:users:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!default).*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Remove all but base ACLs for a directory</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -setfacl -b /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!charlie).*</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!default).*</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!admin).*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Remove all but base ACLs for a file</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /file1</command>
<command>-fs NAMENODE -setfacl -b /file1</command>
<command>-fs NAMENODE -getfacl /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /file1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!charlie).*</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!admin).*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : check inherit default ACL to file</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -touchz /dir1/file</command>
<command>-fs NAMENODE -getfacl /dir1/file</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1/file</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:charlie:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:admin:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!default).*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : check inherit default ACL to dir</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -mkdir /dir1/dir2</command>
<command>-fs NAMENODE -getfacl /dir1/dir2</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1/dir2</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:charlie:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:admin:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user:charlie:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group:admin:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:mask::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:other::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getfacl -R : recursive</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /dir1</command>
<command>-fs NAMENODE -mkdir /dir1/dir2</command>
<command>-fs NAMENODE -setfacl -m user:user1:r-x,group:users:rwx /dir1/dir2</command>
<command>-fs NAMENODE -getfacl -R /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:r-x#LF#group::r-x#LF#group:admin:rwx#LF#mask::rwx#LF#other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:user1:r-x#LF#group::r-x#LF#group:users:rwx#LF#mask::rwx#LF#other::r-x#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl -R : recursive</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -mkdir /dir1/dir2</command>
<command>-fs NAMENODE -setfacl -R -m user:charlie:r-x,group:admin:rwx /dir1</command>
<command>-fs NAMENODE -getfacl -R /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:r-x#LF#group::r-x#LF#group:admin:rwx#LF#mask::rwx#LF#other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:r-x#LF#group::r-x#LF#group:admin:rwx#LF#mask::rwx#LF#other::r-x#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl --set : Set full set of ACLs</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /dir1</command>
<command>-fs NAMENODE -setfacl --set user::rw-,group::r--,other::r--,user:user1:r-x,group:users:rw- /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:user1:r-x#LF#group::r--#LF#group:users:rw-#LF#mask::rwx#LF#other::r--#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl -x mask : remove mask entry along with other ACL entries</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /dir1</command>
<command>-fs NAMENODE -setfacl -x mask::,user:charlie,group:admin /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#group::r-x#LF#other::r-x#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getfacl: only default ACL</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:rwx /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user:charlie:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:mask::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:other::r-x</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getfacl: effective permissions</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:rwx,group::-wx,group:sales:rwx,mask::r-x,default:user:charlie:rwx,default:group::r-x,default:group:sales:rwx,default:mask::rw- /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^user:charlie:rwx\t#effective:r-x$</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^group::-wx\t#effective:--x$</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^group:sales:rwx\t#effective:r-x$</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user::rwx</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^default:user:charlie:rwx\t#effective:rw-$</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^default:group::r-x\t#effective:r--$</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^default:group:sales:rwx\t#effective:rw-$</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:mask::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:other::r-x</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>ls: display extended acl marker</description>
<test-commands>
<command>-fs NAMENODE -mkdir -p /dir1/dir2</command>
<command>-fs NAMENODE -setfacl -m user:charlie:rwx,group::-wx,group:sales:rwx,mask::r-x,default:user:charlie:rwx,default:group::r-x,default:group:sales:rwx,default:mask::rw- /dir1/dir2</command>
<command>-fs NAMENODE -ls /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>TokenComparator</type>
<expected-output>Found 1 items</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^drwxr-xr-x\+( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir2</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl: recursive modify entries with mix of files and directories</description>
<test-commands>
<command>-fs NAMENODE -mkdir -p /dir1</command>
<command>-fs NAMENODE -touchz /dir1/file1</command>
<command>-fs NAMENODE -mkdir -p /dir1/dir2</command>
<command>-fs NAMENODE -touchz /dir1/dir2/file2</command>
<command>-fs NAMENODE -setfacl -R -m user:charlie:rwx,default:user:charlie:r-x /dir1</command>
<command>-fs NAMENODE -getfacl -R /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2/file2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF## file: /dir1/file1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl: recursive remove entries with mix of files and directories</description>
<test-commands>
<command>-fs NAMENODE -mkdir -p /dir1</command>
<command>-fs NAMENODE -touchz /dir1/file1</command>
<command>-fs NAMENODE -mkdir -p /dir1/dir2</command>
<command>-fs NAMENODE -touchz /dir1/dir2/file2</command>
<command>-fs NAMENODE -setfacl -R -m user:bob:rwx,user:charlie:rwx,default:user:bob:rwx,default:user:charlie:r-x /dir1</command>
<command>-fs NAMENODE -setfacl -R -x user:bob,default:user:bob /dir1</command>
<command>-fs NAMENODE -getfacl -R /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2/file2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF## file: /dir1/file1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl: recursive set with mix of files and directories</description>
<test-commands>
<command>-fs NAMENODE -mkdir -p /dir1</command>
<command>-fs NAMENODE -touchz /dir1/file1</command>
<command>-fs NAMENODE -mkdir -p /dir1/dir2</command>
<command>-fs NAMENODE -touchz /dir1/dir2/file2</command>
<command>-fs NAMENODE -setfacl -R --set user::rwx,user:charlie:rwx,group::r-x,other::r-x,default:user:charlie:r-x /dir1</command>
<command>-fs NAMENODE -getfacl -R /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2/file2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF##LF## file: /dir1/file1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
</tests>
</configuration>