Merge r1555021 through r1569889 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1569890 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2014-02-19 19:57:36 +00:00
commit 72c214c89b
121 changed files with 11680 additions and 517 deletions

View File

@ -8,6 +8,9 @@ Trunk (Unreleased)
FSDataOutputStream.sync() and Syncable.sync(). (szetszwo) FSDataOutputStream.sync() and Syncable.sync(). (szetszwo)
NEW FEATURES NEW FEATURES
HADOOP-10184. Hadoop Common changes required to support HDFS ACLs. (See
breakdown of tasks below for features and contributors)
IMPROVEMENTS IMPROVEMENTS
@ -295,6 +298,34 @@ Trunk (Unreleased)
HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia) HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia)
BREAKDOWN OF HADOOP-10184 SUBTASKS AND RELATED JIRAS
HADOOP-10185. FileSystem API for ACLs. (cnauroth)
HADOOP-10186. Remove AclReadFlag and AclWriteFlag in FileSystem API.
(Haohui Mai via cnauroth)
HADOOP-10187. FsShell CLI: add getfacl and setfacl with minimal support for
getting and setting ACLs. (Vinay via cnauroth)
HADOOP-10192. FileSystem#getAclStatus has incorrect JavaDocs. (cnauroth)
HADOOP-10220. Add ACL indicator bit to FsPermission. (cnauroth)
HADOOP-10241. Clean up output of FsShell getfacl. (Chris Nauroth via wheat9)
HADOOP-10213. Fix bugs parsing ACL spec in FsShell setfacl.
(Vinay via cnauroth)
HADOOP-10277. setfacl -x fails to parse ACL spec if trying to remove the
mask entry. (Vinay via cnauroth)
HADOOP-10270. getfacl does not display effective permissions of masked
entries. (cnauroth)
HADOOP-10344. Fix TestAclCommands after merging HADOOP-10338 patch.
(cnauroth)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd) HADOOP-7761. Improve the performance of raw comparisons. (todd)

View File

@ -25,8 +25,6 @@ import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
@ -40,7 +38,6 @@ import java.util.ServiceLoader;
import java.util.Set; import java.util.Set;
import java.util.Stack; import java.util.Stack;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -51,6 +48,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -2269,6 +2268,88 @@ public abstract class FileSystem extends Configured implements Closeable {
+ " doesn't support deleteSnapshot"); + " doesn't support deleteSnapshot");
} }
/**
* Modifies ACL entries of files and directories. This method can add new ACL
* entries or modify the permissions on existing ACL entries. All existing
* ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.)
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications
* @throws IOException if an ACL could not be modified
*/
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support modifyAclEntries");
}
/**
* Removes ACL entries from files and directories. Other ACL entries are
* retained.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing entries to remove
* @throws IOException if an ACL could not be modified
*/
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAclEntries");
}
/**
* Removes all default ACL entries from files and directories.
*
* @param path Path to modify
* @throws IOException if an ACL could not be modified
*/
public void removeDefaultAcl(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeDefaultAcl");
}
/**
* Removes all but the base ACL entries of files and directories. The entries
* for user, group, and others are retained for compatibility with permission
* bits.
*
* @param path Path to modify
* @throws IOException if an ACL could not be removed
*/
public void removeAcl(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAcl");
}
/**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications, must include entries
* for user, group, and others for compatibility with permission bits.
* @throws IOException if an ACL could not be modified
*/
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setAcl");
}
/**
* Gets the ACL of a file or directory.
*
* @param path Path to get
* @return AclStatus describing the ACL of the file or directory
* @throws IOException if an ACL could not be read
*/
public AclStatus getAclStatus(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getAclStatus");
}
// making it volatile to be able to do a double checked locking // making it volatile to be able to do a double checked locking
private volatile static boolean FILE_SYSTEMS_LOADED = false; private volatile static boolean FILE_SYSTEMS_LOADED = false;

View File

@ -22,9 +22,13 @@ import java.io.*;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Options.ChecksumOpt;
@ -507,4 +511,36 @@ public class FilterFileSystem extends FileSystem {
throws IOException { throws IOException {
fs.deleteSnapshot(path, snapshotName); fs.deleteSnapshot(path, snapshotName);
} }
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
fs.modifyAclEntries(path, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
fs.removeAclEntries(path, aclSpec);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
fs.removeDefaultAcl(path);
}
@Override
public void removeAcl(Path path) throws IOException {
fs.removeAcl(path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
fs.setAcl(path, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
return fs.getAclStatus(path);
}
} }

View File

@ -569,9 +569,6 @@ public class RawLocalFileSystem extends FileSystem {
//expected format //expected format
//-rw------- 1 username groupname ... //-rw------- 1 username groupname ...
String permission = t.nextToken(); String permission = t.nextToken();
if (permission.length() > 10) { //files with ACLs might have a '+'
permission = permission.substring(0, 10);
}
setPermission(FsPermission.valueOf(permission)); setPermission(FsPermission.valueOf(permission));
t.nextToken(); t.nextToken();

View File

@ -0,0 +1,301 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import com.google.common.base.Objects;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
/**
* Defines a single entry in an ACL. An ACL entry has a type (user, group,
* mask, or other), an optional name (referring to a specific user or group), a
* set of permissions (any combination of read, write and execute), and a scope
* (access or default). AclEntry instances are immutable. Use a {@link Builder}
* to create a new instance.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class AclEntry {
private final AclEntryType type;
private final String name;
private final FsAction permission;
private final AclEntryScope scope;
/**
* Returns the ACL entry type.
*
* @return AclEntryType ACL entry type
*/
public AclEntryType getType() {
return type;
}
/**
* Returns the optional ACL entry name.
*
* @return String ACL entry name, or null if undefined
*/
public String getName() {
return name;
}
/**
* Returns the set of permissions in the ACL entry.
*
* @return FsAction set of permissions in the ACL entry
*/
public FsAction getPermission() {
return permission;
}
/**
* Returns the scope of the ACL entry.
*
* @return AclEntryScope scope of the ACL entry
*/
public AclEntryScope getScope() {
return scope;
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (getClass() != o.getClass()) {
return false;
}
AclEntry other = (AclEntry)o;
return Objects.equal(type, other.type) &&
Objects.equal(name, other.name) &&
Objects.equal(permission, other.permission) &&
Objects.equal(scope, other.scope);
}
@Override
public int hashCode() {
return Objects.hashCode(type, name, permission, scope);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
if (scope == AclEntryScope.DEFAULT) {
sb.append("default:");
}
if (type != null) {
sb.append(type.toString().toLowerCase());
}
sb.append(':');
if (name != null) {
sb.append(name);
}
sb.append(':');
if (permission != null) {
sb.append(permission.SYMBOL);
}
return sb.toString();
}
/**
* Builder for creating new AclEntry instances.
*/
public static class Builder {
private AclEntryType type;
private String name;
private FsAction permission;
private AclEntryScope scope = AclEntryScope.ACCESS;
/**
* Sets the ACL entry type.
*
* @param type AclEntryType ACL entry type
* @return Builder this builder, for call chaining
*/
public Builder setType(AclEntryType type) {
this.type = type;
return this;
}
/**
* Sets the optional ACL entry name.
*
* @param name String optional ACL entry name
* @return Builder this builder, for call chaining
*/
public Builder setName(String name) {
this.name = name;
return this;
}
/**
* Sets the set of permissions in the ACL entry.
*
* @param permission FsAction set of permissions in the ACL entry
* @return Builder this builder, for call chaining
*/
public Builder setPermission(FsAction permission) {
this.permission = permission;
return this;
}
/**
* Sets the scope of the ACL entry. If this method is not called, then the
* builder assumes {@link AclEntryScope#ACCESS}.
*
* @param scope AclEntryScope scope of the ACL entry
* @return Builder this builder, for call chaining
*/
public Builder setScope(AclEntryScope scope) {
this.scope = scope;
return this;
}
/**
* Builds a new AclEntry populated with the set properties.
*
* @return AclEntry new AclEntry
*/
public AclEntry build() {
return new AclEntry(type, name, permission, scope);
}
}
/**
* Private constructor.
*
* @param type AclEntryType ACL entry type
* @param name String optional ACL entry name
* @param permission FsAction set of permissions in the ACL entry
* @param scope AclEntryScope scope of the ACL entry
*/
private AclEntry(AclEntryType type, String name, FsAction permission, AclEntryScope scope) {
this.type = type;
this.name = name;
this.permission = permission;
this.scope = scope;
}
/**
* Parses a string representation of an ACL spec into a list of AclEntry
* objects. Example: "user::rwx,user:foo:rw-,group::r--,other::---"
*
* @param aclSpec
* String representation of an ACL spec.
* @param includePermission
* for setAcl operations this will be true. i.e. AclSpec should
* include permissions.<br>
* But for removeAcl operation it will be false. i.e. AclSpec should
* not contain permissions.<br>
* Example: "user:foo,group:bar"
* @return Returns list of {@link AclEntry} parsed
*/
public static List<AclEntry> parseAclSpec(String aclSpec,
boolean includePermission) {
List<AclEntry> aclEntries = new ArrayList<AclEntry>();
Collection<String> aclStrings = StringUtils.getStringCollection(aclSpec,
",");
for (String aclStr : aclStrings) {
AclEntry aclEntry = parseAclEntry(aclStr, includePermission);
aclEntries.add(aclEntry);
}
return aclEntries;
}
/**
* Parses a string representation of an ACL into a AclEntry object.<br>
*
* @param aclStr
* String representation of an ACL.<br>
* Example: "user:foo:rw-"
* @param includePermission
* for setAcl operations this will be true. i.e. Acl should include
* permissions.<br>
* But for removeAcl operation it will be false. i.e. Acl should not
* contain permissions.<br>
* Example: "user:foo,group:bar,mask::"
* @return Returns an {@link AclEntry} object
*/
public static AclEntry parseAclEntry(String aclStr,
boolean includePermission) {
AclEntry.Builder builder = new AclEntry.Builder();
// Here "::" represent one empty string.
// StringUtils.getStringCollection() will ignore this.
String[] split = aclStr.split(":");
if (split.length == 0) {
throw new HadoopIllegalArgumentException("Invalid <aclSpec> : " + aclStr);
}
int index = 0;
if ("default".equals(split[0])) {
// default entry
index++;
builder.setScope(AclEntryScope.DEFAULT);
}
if (split.length <= index) {
throw new HadoopIllegalArgumentException("Invalid <aclSpec> : " + aclStr);
}
AclEntryType aclType = null;
try {
aclType = Enum.valueOf(AclEntryType.class, split[index].toUpperCase());
builder.setType(aclType);
index++;
} catch (IllegalArgumentException iae) {
throw new HadoopIllegalArgumentException(
"Invalid type of acl in <aclSpec> :" + aclStr);
}
if (split.length > index) {
String name = split[index];
if (!name.isEmpty()) {
builder.setName(name);
}
index++;
}
if (includePermission) {
if (split.length < index) {
throw new HadoopIllegalArgumentException("Invalid <aclSpec> : "
+ aclStr);
}
String permission = split[index];
FsAction fsAction = FsAction.getFsAction(permission);
if (null == fsAction) {
throw new HadoopIllegalArgumentException(
"Invalid permission in <aclSpec> : " + aclStr);
}
builder.setPermission(fsAction);
index++;
}
if (split.length > index) {
throw new HadoopIllegalArgumentException("Invalid <aclSpec> : " + aclStr);
}
AclEntry aclEntry = builder.build();
return aclEntry;
}
}

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Specifies the scope or intended usage of an ACL entry.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum AclEntryScope {
/**
* An ACL entry that is inspected during permission checks to enforce
* permissions.
*/
ACCESS,
/**
* An ACL entry to be applied to a directory's children that do not otherwise
* have their own ACL defined. Unlike an access ACL entry, a default ACL
* entry is not inspected as part of permission enforcement on the directory
* that owns it.
*/
DEFAULT;
}

View File

@ -0,0 +1,58 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Specifies the type of an ACL entry.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum AclEntryType {
/**
* An ACL entry applied to a specific user. These ACL entries can be unnamed,
* which applies to the file owner, or named, which applies to the specific
* named user.
*/
USER,
/**
* An ACL entry applied to a specific group. These ACL entries can be
* unnamed, which applies to the file's group, or named, which applies to the
* specific named group.
*/
GROUP,
/**
* An ACL mask entry. Mask entries are unnamed. During permission checks,
* the mask entry interacts with all ACL entries that are members of the group
* class. This consists of all named user entries, the unnamed group entry,
* and all named group entries. For each such entry, any permissions that are
* absent from the mask entry are removed from the effective permissions used
* during the permission check.
*/
MASK,
/**
* An ACL entry that applies to all other users that were not covered by one
* of the more specific ACL entry types.
*/
OTHER;
}

View File

@ -0,0 +1,201 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Objects;
import com.google.common.collect.Lists;
/**
* An AclStatus contains the ACL information of a specific file. AclStatus
* instances are immutable. Use a {@link Builder} to create a new instance.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class AclStatus {
private final String owner;
private final String group;
private final boolean stickyBit;
private final List<AclEntry> entries;
/**
* Returns the file owner.
*
* @return String file owner
*/
public String getOwner() {
return owner;
}
/**
* Returns the file group.
*
* @return String file group
*/
public String getGroup() {
return group;
}
/**
* Returns the sticky bit.
*
* @return boolean sticky bit
*/
public boolean isStickyBit() {
return stickyBit;
}
/**
* Returns the list of all ACL entries, ordered by their natural ordering.
*
* @return List<AclEntry> unmodifiable ordered list of all ACL entries
*/
public List<AclEntry> getEntries() {
return entries;
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (getClass() != o.getClass()) {
return false;
}
AclStatus other = (AclStatus)o;
return Objects.equal(owner, other.owner)
&& Objects.equal(group, other.group)
&& stickyBit == other.stickyBit
&& Objects.equal(entries, other.entries);
}
@Override
public int hashCode() {
return Objects.hashCode(owner, group, stickyBit, entries);
}
@Override
public String toString() {
return new StringBuilder()
.append("owner: ").append(owner)
.append(", group: ").append(group)
.append(", acl: {")
.append("entries: ").append(entries)
.append(", stickyBit: ").append(stickyBit)
.append('}')
.toString();
}
/**
* Builder for creating new Acl instances.
*/
public static class Builder {
private String owner;
private String group;
private boolean stickyBit;
private List<AclEntry> entries = Lists.newArrayList();
/**
* Sets the file owner.
*
* @param owner String file owner
* @return Builder this builder, for call chaining
*/
public Builder owner(String owner) {
this.owner = owner;
return this;
}
/**
* Sets the file group.
*
* @param group String file group
* @return Builder this builder, for call chaining
*/
public Builder group(String group) {
this.group = group;
return this;
}
/**
* Adds an ACL entry.
*
* @param e AclEntry entry to add
* @return Builder this builder, for call chaining
*/
public Builder addEntry(AclEntry e) {
this.entries.add(e);
return this;
}
/**
* Adds a list of ACL entries.
*
* @param entries AclEntry entries to add
* @return Builder this builder, for call chaining
*/
public Builder addEntries(Iterable<AclEntry> entries) {
for (AclEntry e : entries)
this.entries.add(e);
return this;
}
/**
* Sets sticky bit. If this method is not called, then the builder assumes
* false.
*
* @param stickyBit
* boolean sticky bit
* @return Builder this builder, for call chaining
*/
public Builder stickyBit(boolean stickyBit) {
this.stickyBit = stickyBit;
return this;
}
/**
* Builds a new AclStatus populated with the set properties.
*
* @return AclStatus new AclStatus
*/
public AclStatus build() {
return new AclStatus(owner, group, stickyBit, entries);
}
}
/**
* Private constructor.
*
* @param file Path file associated to this ACL
* @param owner String file owner
* @param group String file group
* @param stickyBit the sticky bit
* @param entries the ACL entries
*/
private AclStatus(String owner, String group, boolean stickyBit,
Iterable<AclEntry> entries) {
this.owner = owner;
this.group = group;
this.stickyBit = stickyBit;
this.entries = Lists.newArrayList(entries);
}
}

View File

@ -23,8 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability;
/** /**
* File system actions, e.g. read, write, etc. * File system actions, e.g. read, write, etc.
*/ */
@InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceAudience.Public
@InterfaceStability.Unstable @InterfaceStability.Stable
public enum FsAction { public enum FsAction {
// POSIX style // POSIX style
NONE("---"), NONE("---"),
@ -69,4 +69,21 @@ public enum FsAction {
public FsAction not() { public FsAction not() {
return vals[7 - ordinal()]; return vals[7 - ordinal()];
} }
/**
* Get the FsAction enum for String representation of permissions
*
* @param permission
* 3-character string representation of permission. ex: rwx
* @return Returns FsAction enum if the corresponding FsAction exists for permission.
* Otherwise returns null
*/
public static FsAction getFsAction(String permission) {
for (FsAction fsAction : vals) {
if (fsAction.SYMBOL.equals(permission)) {
return fsAction;
}
}
return null;
}
} }

View File

@ -0,0 +1,283 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
/**
* Acl related operations
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
class AclCommands extends FsCommand {
private static String GET_FACL = "getfacl";
private static String SET_FACL = "setfacl";
public static void registerCommands(CommandFactory factory) {
factory.addClass(GetfaclCommand.class, "-" + GET_FACL);
factory.addClass(SetfaclCommand.class, "-" + SET_FACL);
}
/**
* Implementing the '-getfacl' command for the the FsShell.
*/
public static class GetfaclCommand extends FsCommand {
public static String NAME = GET_FACL;
public static String USAGE = "[-R] <path>";
public static String DESCRIPTION = "Displays the Access Control Lists"
+ " (ACLs) of files and directories. If a directory has a default ACL,"
+ " then getfacl also displays the default ACL.\n"
+ "-R: List the ACLs of all files and directories recursively.\n"
+ "<path>: File or directory to list.\n";
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "R");
cf.parse(args);
setRecursive(cf.getOpt("R"));
if (args.isEmpty()) {
throw new HadoopIllegalArgumentException("<path> is missing");
}
if (args.size() > 1) {
throw new HadoopIllegalArgumentException("Too many arguments");
}
}
@Override
protected void processPath(PathData item) throws IOException {
AclStatus aclStatus = item.fs.getAclStatus(item.path);
out.println("# file: " + item);
out.println("# owner: " + aclStatus.getOwner());
out.println("# group: " + aclStatus.getGroup());
List<AclEntry> entries = aclStatus.getEntries();
if (aclStatus.isStickyBit()) {
String stickyFlag = "T";
for (AclEntry aclEntry : entries) {
if (aclEntry.getType() == AclEntryType.OTHER
&& aclEntry.getScope() == AclEntryScope.ACCESS
&& aclEntry.getPermission().implies(FsAction.EXECUTE)) {
stickyFlag = "t";
break;
}
}
out.println("# flags: --" + stickyFlag);
}
FsPermission perm = item.stat.getPermission();
if (entries.isEmpty()) {
printMinimalAcl(perm);
} else {
printExtendedAcl(perm, entries);
}
out.println();
}
/**
* Prints an extended ACL, including all extended ACL entries and also the
* base entries implied by the permission bits.
*
* @param perm FsPermission of file
* @param entries List<AclEntry> containing ACL entries of file
*/
private void printExtendedAcl(FsPermission perm, List<AclEntry> entries) {
// Print owner entry implied by owner permission bits.
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.setPermission(perm.getUserAction())
.build());
// Print all extended access ACL entries.
boolean hasAccessAcl = false;
Iterator<AclEntry> entryIter = entries.iterator();
AclEntry curEntry = null;
while (entryIter.hasNext()) {
curEntry = entryIter.next();
if (curEntry.getScope() == AclEntryScope.DEFAULT) {
break;
}
hasAccessAcl = true;
printExtendedAclEntry(curEntry, perm.getGroupAction());
}
// Print mask entry implied by group permission bits, or print group entry
// if there is no access ACL (only default ACL).
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP)
.setPermission(perm.getGroupAction())
.build());
// Print other entry implied by other bits.
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.setPermission(perm.getOtherAction())
.build());
// Print default ACL entries.
if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) {
out.println(curEntry);
// ACL sort order guarantees default mask is the second-to-last entry.
FsAction maskPerm = entries.get(entries.size() - 2).getPermission();
while (entryIter.hasNext()) {
printExtendedAclEntry(entryIter.next(), maskPerm);
}
}
}
/**
* Prints a single extended ACL entry. If the mask restricts the
* permissions of the entry, then also prints the restricted version as the
* effective permissions. The mask applies to all named entries and also
* the unnamed group entry.
*
* @param entry AclEntry extended ACL entry to print
* @param maskPerm FsAction permissions in the ACL's mask entry
*/
private void printExtendedAclEntry(AclEntry entry, FsAction maskPerm) {
if (entry.getName() != null || entry.getType() == AclEntryType.GROUP) {
FsAction entryPerm = entry.getPermission();
FsAction effectivePerm = entryPerm.and(maskPerm);
if (entryPerm != effectivePerm) {
out.println(String.format("%-31s #effective:%s", entry,
effectivePerm.SYMBOL));
} else {
out.println(entry);
}
} else {
out.println(entry);
}
}
/**
* Prints a minimal ACL, consisting of exactly 3 ACL entries implied by the
* permission bits.
*
* @param perm FsPermission of file
*/
private void printMinimalAcl(FsPermission perm) {
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.setPermission(perm.getUserAction())
.build());
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.GROUP)
.setPermission(perm.getGroupAction())
.build());
out.println(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.setPermission(perm.getOtherAction())
.build());
}
}
/**
* Implementing the '-setfacl' command for the the FsShell.
*/
public static class SetfaclCommand extends FsCommand {
public static String NAME = SET_FACL;
public static String USAGE = "[-R] [{-b|-k} {-m|-x <acl_spec>} <path>]"
+ "|[--set <acl_spec> <path>]";
public static String DESCRIPTION = "Sets Access Control Lists (ACLs)"
+ " of files and directories.\n"
+ "Options:\n"
+ "-b :Remove all but the base ACL entries. The entries for user,"
+ " group and others are retained for compatibility with permission "
+ "bits.\n"
+ "-k :Remove the default ACL.\n"
+ "-R :Apply operations to all files and directories recursively.\n"
+ "-m :Modify ACL. New entries are added to the ACL, and existing"
+ " entries are retained.\n"
+ "-x :Remove specified ACL entries. Other ACL entries are retained.\n"
+ "--set :Fully replace the ACL, discarding all existing entries."
+ " The <acl_spec> must include entries for user, group, and others"
+ " for compatibility with permission bits.\n"
+ "<acl_spec>: Comma separated list of ACL entries.\n"
+ "<path>: File or directory to modify.\n";
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "b", "k", "R",
"m", "x", "-set");
List<AclEntry> aclEntries = null;
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
cf.parse(args);
setRecursive(cf.getOpt("R"));
// Mix of remove and modify acl flags are not allowed
boolean bothRemoveOptions = cf.getOpt("b") && cf.getOpt("k");
boolean bothModifyOptions = cf.getOpt("m") && cf.getOpt("x");
boolean oneRemoveOption = cf.getOpt("b") || cf.getOpt("k");
boolean oneModifyOption = cf.getOpt("m") || cf.getOpt("x");
boolean setOption = cf.getOpt("-set");
if ((bothRemoveOptions || bothModifyOptions)
|| (oneRemoveOption && oneModifyOption)
|| (setOption && (oneRemoveOption || oneModifyOption))) {
throw new HadoopIllegalArgumentException(
"Specified flags contains both remove and modify flags");
}
// Only -m, -x and --set expects <acl_spec>
if (oneModifyOption || setOption) {
if (args.size() < 2) {
throw new HadoopIllegalArgumentException("<acl_spec> is missing");
}
aclEntries = AclEntry.parseAclSpec(args.removeFirst(), !cf.getOpt("x"));
}
if (args.isEmpty()) {
throw new HadoopIllegalArgumentException("<path> is missing");
}
if (args.size() > 1) {
throw new HadoopIllegalArgumentException("Too many arguments");
}
}
@Override
protected void processPath(PathData item) throws IOException {
if (cf.getOpt("b")) {
item.fs.removeAcl(item.path);
} else if (cf.getOpt("k")) {
item.fs.removeDefaultAcl(item.path);
} else if (cf.getOpt("m")) {
item.fs.modifyAclEntries(item.path, aclEntries);
} else if (cf.getOpt("x")) {
item.fs.removeAclEntries(item.path, aclEntries);
} else if (cf.getOpt("-set")) {
item.fs.setAcl(item.path, aclEntries);
}
}
}
}

View File

@ -43,6 +43,7 @@ abstract public class FsCommand extends Command {
* @param factory where to register the class * @param factory where to register the class
*/ */
public static void registerCommands(CommandFactory factory) { public static void registerCommands(CommandFactory factory) {
factory.registerCommands(AclCommands.class);
factory.registerCommands(CopyCommands.class); factory.registerCommands(CopyCommands.class);
factory.registerCommands(Count.class); factory.registerCommands(Count.class);
factory.registerCommands(Delete.class); factory.registerCommands(Delete.class);

View File

@ -19,15 +19,22 @@
package org.apache.hadoop.fs.shell; package org.apache.hadoop.fs.shell;
import java.io.IOException; import java.io.IOException;
import java.net.URI;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.Date; import java.util.Date;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.Set;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcNoSuchMethodException;
import com.google.common.collect.Sets;
/** /**
* Get a listing of all files in that match the file patterns. * Get a listing of all files in that match the file patterns.
@ -60,11 +67,14 @@ class Ls extends FsCommand {
protected static final SimpleDateFormat dateFormat = protected static final SimpleDateFormat dateFormat =
new SimpleDateFormat("yyyy-MM-dd HH:mm"); new SimpleDateFormat("yyyy-MM-dd HH:mm");
protected int maxRepl = 3, maxLen = 10, maxOwner = 0, maxGroup = 0; protected int maxPerm = 9, maxRepl = 3, maxLen = 10, maxOwner = 0,
maxGroup = 0;
protected String lineFormat; protected String lineFormat;
protected boolean dirRecurse; protected boolean dirRecurse;
protected boolean humanReadable = false; protected boolean humanReadable = false;
private Set<URI> aclNotSupportedFsSet = Sets.newHashSet();
protected String formatSize(long size) { protected String formatSize(long size) {
return humanReadable return humanReadable
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1) ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
@ -107,7 +117,7 @@ class Ls extends FsCommand {
FileStatus stat = item.stat; FileStatus stat = item.stat;
String line = String.format(lineFormat, String line = String.format(lineFormat,
(stat.isDirectory() ? "d" : "-"), (stat.isDirectory() ? "d" : "-"),
stat.getPermission(), stat.getPermission() + (hasAcl(item) ? "+" : ""),
(stat.isFile() ? stat.getReplication() : "-"), (stat.isFile() ? stat.getReplication() : "-"),
stat.getOwner(), stat.getOwner(),
stat.getGroup(), stat.getGroup(),
@ -125,6 +135,7 @@ class Ls extends FsCommand {
private void adjustColumnWidths(PathData items[]) { private void adjustColumnWidths(PathData items[]) {
for (PathData item : items) { for (PathData item : items) {
FileStatus stat = item.stat; FileStatus stat = item.stat;
maxPerm = maxLength(maxPerm, stat.getPermission());
maxRepl = maxLength(maxRepl, stat.getReplication()); maxRepl = maxLength(maxRepl, stat.getReplication());
maxLen = maxLength(maxLen, stat.getLen()); maxLen = maxLength(maxLen, stat.getLen());
maxOwner = maxLength(maxOwner, stat.getOwner()); maxOwner = maxLength(maxOwner, stat.getOwner());
@ -132,7 +143,7 @@ class Ls extends FsCommand {
} }
StringBuilder fmt = new StringBuilder(); StringBuilder fmt = new StringBuilder();
fmt.append("%s%s "); // permission string fmt.append("%s%-" + maxPerm + "s "); // permission string
fmt.append("%" + maxRepl + "s "); fmt.append("%" + maxRepl + "s ");
// Do not use '%-0s' as a formatting conversion, since it will throw a // Do not use '%-0s' as a formatting conversion, since it will throw a
// a MissingFormatWidthException if it is used in String.format(). // a MissingFormatWidthException if it is used in String.format().
@ -144,6 +155,49 @@ class Ls extends FsCommand {
lineFormat = fmt.toString(); lineFormat = fmt.toString();
} }
/**
* Calls getAclStatus to determine if the given item has an ACL. For
* compatibility, this method traps errors caused by the RPC method missing
* from the server side. This would happen if the client was connected to an
* old NameNode that didn't have the ACL APIs. This method also traps the
* case of the client-side FileSystem not implementing the ACL APIs.
* FileSystem instances that do not support ACLs are remembered. This
* prevents the client from sending multiple failing RPC calls during a
* recursive ls.
*
* @param item PathData item to check
* @return boolean true if item has an ACL
* @throws IOException if there is a failure
*/
private boolean hasAcl(PathData item) throws IOException {
FileSystem fs = item.fs;
if (aclNotSupportedFsSet.contains(fs.getUri())) {
// This FileSystem failed to run the ACL API in an earlier iteration.
return false;
}
try {
return !fs.getAclStatus(item.path).getEntries().isEmpty();
} catch (RemoteException e) {
// If this is a RpcNoSuchMethodException, then the client is connected to
// an older NameNode that doesn't support ACLs. Keep going.
IOException e2 = e.unwrapRemoteException(RpcNoSuchMethodException.class);
if (!(e2 instanceof RpcNoSuchMethodException)) {
throw e;
}
} catch (IOException e) {
// The NameNode supports ACLs, but they are not enabled. Keep going.
String message = e.getMessage();
if (message != null && !message.contains("ACLs has been disabled")) {
throw e;
}
} catch (UnsupportedOperationException e) {
// The underlying FileSystem doesn't implement ACLs. Keep going.
}
// Remember that this FileSystem cannot support ACLs.
aclNotSupportedFsSet.add(fs.getUri());
return false;
}
private int maxLength(int n, Object value) { private int maxLength(int n, Object value) {
return Math.max(n, (value != null) ? String.valueOf(value).length() : 0); return Math.max(n, (value != null) ? String.valueOf(value).length() : 0);
} }

View File

@ -20,6 +20,7 @@ import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -36,6 +37,8 @@ import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
@ -277,7 +280,39 @@ class ChRootedFileSystem extends FilterFileSystem {
throws IOException { throws IOException {
super.setTimes(fullPath(f), mtime, atime); super.setTimes(fullPath(f), mtime, atime);
} }
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
super.modifyAclEntries(fullPath(path), aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
super.removeAclEntries(fullPath(path), aclSpec);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
super.removeDefaultAcl(fullPath(path));
}
@Override
public void removeAcl(Path path) throws IOException {
super.removeAcl(fullPath(path));
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
super.setAcl(fullPath(path), aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
return super.getAclStatus(fullPath(path));
}
@Override @Override
public Path resolvePath(final Path p) throws IOException { public Path resolvePath(final Path p) throws IOException {
return super.resolvePath(fullPath(p)); return super.resolvePath(fullPath(p));

View File

@ -28,7 +28,6 @@ import java.util.EnumSet;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.StringTokenizer;
import java.util.Map.Entry; import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
@ -45,9 +44,10 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@ -473,6 +473,52 @@ public class ViewFileSystem extends FileSystem {
res.targetFileSystem.setTimes(res.remainingPath, mtime, atime); res.targetFileSystem.setTimes(res.remainingPath, mtime, atime);
} }
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
true);
res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
true);
res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec);
}
@Override
public void removeDefaultAcl(Path path)
throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeDefaultAcl(res.remainingPath);
}
@Override
public void removeAcl(Path path)
throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeAcl(res.remainingPath);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.setAcl(res.remainingPath, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
return res.targetFileSystem.getAclStatus(res.remainingPath);
}
@Override @Override
public void setVerifyChecksum(final boolean verifyChecksum) { public void setVerifyChecksum(final boolean verifyChecksum) {
List<InodeTree.MountPoint<FileSystem>> mountPoints = List<InodeTree.MountPoint<FileSystem>> mountPoints =

View File

@ -25,6 +25,7 @@ import java.util.HashSet;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
// Keeps track of which datanodes/tasktrackers are allowed to connect to the // Keeps track of which datanodes/tasktrackers are allowed to connect to the
@ -48,13 +49,30 @@ public class HostsFileReader {
refresh(); refresh();
} }
@Private
public HostsFileReader(String includesFile, InputStream inFileInputStream,
String excludesFile, InputStream exFileInputStream) throws IOException {
includes = new HashSet<String>();
excludes = new HashSet<String>();
this.includesFile = includesFile;
this.excludesFile = excludesFile;
refresh(inFileInputStream, exFileInputStream);
}
public static void readFileToSet(String type, public static void readFileToSet(String type,
String filename, Set<String> set) throws IOException { String filename, Set<String> set) throws IOException {
File file = new File(filename); File file = new File(filename);
FileInputStream fis = new FileInputStream(file); FileInputStream fis = new FileInputStream(file);
readFileToSetWithFileInputStream(type, filename, fis, set);
}
@Private
public static void readFileToSetWithFileInputStream(String type,
String filename, InputStream fileInputStream, Set<String> set)
throws IOException {
BufferedReader reader = null; BufferedReader reader = null;
try { try {
reader = new BufferedReader(new InputStreamReader(fis)); reader = new BufferedReader(new InputStreamReader(fileInputStream));
String line; String line;
while ((line = reader.readLine()) != null) { while ((line = reader.readLine()) != null) {
String[] nodes = line.split("[ \t\n\f\r]+"); String[] nodes = line.split("[ \t\n\f\r]+");
@ -71,26 +89,63 @@ public class HostsFileReader {
} }
} }
} }
} }
} finally { } finally {
if (reader != null) { if (reader != null) {
reader.close(); reader.close();
} }
fis.close(); fileInputStream.close();
} }
} }
public synchronized void refresh() throws IOException { public synchronized void refresh() throws IOException {
LOG.info("Refreshing hosts (include/exclude) list"); LOG.info("Refreshing hosts (include/exclude) list");
Set<String> newIncludes = new HashSet<String>();
Set<String> newExcludes = new HashSet<String>();
boolean switchIncludes = false;
boolean switchExcludes = false;
if (!includesFile.isEmpty()) { if (!includesFile.isEmpty()) {
Set<String> newIncludes = new HashSet<String>();
readFileToSet("included", includesFile, newIncludes); readFileToSet("included", includesFile, newIncludes);
switchIncludes = true;
}
if (!excludesFile.isEmpty()) {
readFileToSet("excluded", excludesFile, newExcludes);
switchExcludes = true;
}
if (switchIncludes) {
// switch the new hosts that are to be included // switch the new hosts that are to be included
includes = newIncludes; includes = newIncludes;
} }
if (!excludesFile.isEmpty()) { if (switchExcludes) {
Set<String> newExcludes = new HashSet<String>(); // switch the excluded hosts
readFileToSet("excluded", excludesFile, newExcludes); excludes = newExcludes;
}
}
@Private
public synchronized void refresh(InputStream inFileInputStream,
InputStream exFileInputStream) throws IOException {
LOG.info("Refreshing hosts (include/exclude) list");
Set<String> newIncludes = new HashSet<String>();
Set<String> newExcludes = new HashSet<String>();
boolean switchIncludes = false;
boolean switchExcludes = false;
if (inFileInputStream != null) {
readFileToSetWithFileInputStream("included", includesFile,
inFileInputStream, newIncludes);
switchIncludes = true;
}
if (exFileInputStream != null) {
readFileToSetWithFileInputStream("excluded", excludesFile,
exFileInputStream, newExcludes);
switchExcludes = true;
}
if (switchIncludes) {
// switch the new hosts that are to be included
includes = newIncludes;
}
if (switchExcludes) {
// switch the excluded hosts // switch the excluded hosts
excludes = newExcludes; excludes = newExcludes;
} }

View File

@ -231,6 +231,29 @@ get
Returns 0 on success and -1 on error. Returns 0 on success and -1 on error.
getfacl
Usage: <<<hdfs dfs -getfacl [-R] <path> >>>
Displays the Access Control Lists (ACLs) of files and directories. If a
directory has a default ACL, then getfacl also displays the default ACL.
Options:
* -R: List the ACLs of all files and directories recursively.
* <path>: File or directory to list.
Examples:
* <<<hdfs dfs -getfacl /file>>>
* <<<hdfs dfs -getfacl -R /dir>>>
Exit Code:
Returns 0 on success and non-zero on error.
getmerge getmerge
Usage: <<<hdfs dfs -getmerge <src> <localdst> [addnl]>>> Usage: <<<hdfs dfs -getmerge <src> <localdst> [addnl]>>>
@ -379,6 +402,54 @@ rmr
Returns 0 on success and -1 on error. Returns 0 on success and -1 on error.
setfacl
Usage: <<<hdfs dfs -setfacl [-R] [{-b|-k} {-m|-x <acl_spec>} <path>]|[--set <acl_spec> <path>] >>>
Sets Access Control Lists (ACLs) of files and directories.
Options:
* -b: Remove all but the base ACL entries. The entries for user, group and
others are retained for compatibility with permission bits.
* -k: Remove the default ACL.
* -R: Apply operations to all files and directories recursively.
* -m: Modify ACL. New entries are added to the ACL, and existing entries
are retained.
* -x: Remove specified ACL entries. Other ACL entries are retained.
* --set: Fully replace the ACL, discarding all existing entries. The
<acl_spec> must include entries for user, group, and others for
compatibility with permission bits.
* <acl_spec>: Comma separated list of ACL entries.
* <path>: File or directory to modify.
Examples:
* <<<hdfs dfs -setfacl -m user:hadoop:rw- /file>>>
* <<<hdfs dfs -setfacl -x user:hadoop /file>>>
* <<<hdfs dfs -setfacl -b /file>>>
* <<<hdfs dfs -setfacl -k /dir>>>
* <<<hdfs dfs -setfacl --set user::rw-,user:hadoop:rw-,group::r--,other::r-- /file>>>
* <<<hdfs dfs -setfacl -R -m user:hadoop:r-x /dir>>>
* <<<hdfs dfs -setfacl -m default:user:hadoop:r-x /dir>>>
Exit Code:
Returns 0 on success and non-zero on error.
setrep setrep
Usage: <<<hdfs dfs -setrep [-R] [-w] <numReplicas> <path> >>> Usage: <<<hdfs dfs -setrep [-R] [-w] <numReplicas> <path> >>>

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.fs;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
@ -33,6 +35,7 @@ import java.lang.reflect.Method;
import java.lang.reflect.Modifier; import java.lang.reflect.Modifier;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List;
import static org.apache.hadoop.fs.Options.ChecksumOpt; import static org.apache.hadoop.fs.Options.ChecksumOpt;
import static org.apache.hadoop.fs.Options.CreateOpts; import static org.apache.hadoop.fs.Options.CreateOpts;
@ -165,6 +168,20 @@ public class TestHarFileSystem {
String snapshotNewName) throws IOException; String snapshotNewName) throws IOException;
public void deleteSnapshot(Path path, String snapshotName) public void deleteSnapshot(Path path, String snapshotName)
throws IOException; throws IOException;
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException;
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException;
public void removeDefaultAcl(Path path) throws IOException;
public void removeAcl(Path path) throws IOException;
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException;
public AclStatus getAclStatus(Path path) throws IOException;
} }
@Test @Test

View File

@ -0,0 +1,210 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import static org.junit.Assert.*;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests covering basic functionality of the ACL objects.
*/
public class TestAcl {
private static AclEntry ENTRY1, ENTRY2, ENTRY3, ENTRY4, ENTRY5, ENTRY6,
ENTRY7, ENTRY8, ENTRY9, ENTRY10, ENTRY11, ENTRY12, ENTRY13;
private static AclStatus STATUS1, STATUS2, STATUS3, STATUS4;
@BeforeClass
public static void setUp() {
// named user
AclEntry.Builder aclEntryBuilder = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setName("user1")
.setPermission(FsAction.ALL);
ENTRY1 = aclEntryBuilder.build();
ENTRY2 = aclEntryBuilder.build();
// named group
ENTRY3 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setName("group2")
.setPermission(FsAction.READ_WRITE)
.build();
// default other
ENTRY4 = new AclEntry.Builder()
.setType(AclEntryType.OTHER)
.setPermission(FsAction.NONE)
.setScope(AclEntryScope.DEFAULT)
.build();
// owner
ENTRY5 = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setPermission(FsAction.ALL)
.build();
// default named group
ENTRY6 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setName("group3")
.setPermission(FsAction.READ_WRITE)
.setScope(AclEntryScope.DEFAULT)
.build();
// other
ENTRY7 = new AclEntry.Builder()
.setType(AclEntryType.OTHER)
.setPermission(FsAction.NONE)
.build();
// default named user
ENTRY8 = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setName("user3")
.setPermission(FsAction.ALL)
.setScope(AclEntryScope.DEFAULT)
.build();
// mask
ENTRY9 = new AclEntry.Builder()
.setType(AclEntryType.MASK)
.setPermission(FsAction.READ)
.build();
// default mask
ENTRY10 = new AclEntry.Builder()
.setType(AclEntryType.MASK)
.setPermission(FsAction.READ_EXECUTE)
.setScope(AclEntryScope.DEFAULT)
.build();
// group
ENTRY11 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setPermission(FsAction.READ)
.build();
// default group
ENTRY12 = new AclEntry.Builder()
.setType(AclEntryType.GROUP)
.setPermission(FsAction.READ)
.setScope(AclEntryScope.DEFAULT)
.build();
// default owner
ENTRY13 = new AclEntry.Builder()
.setType(AclEntryType.USER)
.setPermission(FsAction.ALL)
.setScope(AclEntryScope.DEFAULT)
.build();
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder()
.owner("owner1")
.group("group1")
.addEntry(ENTRY1)
.addEntry(ENTRY3)
.addEntry(ENTRY4);
STATUS1 = aclStatusBuilder.build();
STATUS2 = aclStatusBuilder.build();
STATUS3 = new AclStatus.Builder()
.owner("owner2")
.group("group2")
.stickyBit(true)
.build();
STATUS4 = new AclStatus.Builder()
.addEntry(ENTRY1)
.addEntry(ENTRY3)
.addEntry(ENTRY4)
.addEntry(ENTRY5)
.addEntry(ENTRY6)
.addEntry(ENTRY7)
.addEntry(ENTRY8)
.addEntry(ENTRY9)
.addEntry(ENTRY10)
.addEntry(ENTRY11)
.addEntry(ENTRY12)
.addEntry(ENTRY13)
.build();
}
@Test
public void testEntryEquals() {
assertNotSame(ENTRY1, ENTRY2);
assertNotSame(ENTRY1, ENTRY3);
assertNotSame(ENTRY1, ENTRY4);
assertNotSame(ENTRY2, ENTRY3);
assertNotSame(ENTRY2, ENTRY4);
assertNotSame(ENTRY3, ENTRY4);
assertEquals(ENTRY1, ENTRY1);
assertEquals(ENTRY2, ENTRY2);
assertEquals(ENTRY1, ENTRY2);
assertEquals(ENTRY2, ENTRY1);
assertFalse(ENTRY1.equals(ENTRY3));
assertFalse(ENTRY1.equals(ENTRY4));
assertFalse(ENTRY3.equals(ENTRY4));
assertFalse(ENTRY1.equals(null));
assertFalse(ENTRY1.equals(new Object()));
}
@Test
public void testEntryHashCode() {
assertEquals(ENTRY1.hashCode(), ENTRY2.hashCode());
assertFalse(ENTRY1.hashCode() == ENTRY3.hashCode());
assertFalse(ENTRY1.hashCode() == ENTRY4.hashCode());
assertFalse(ENTRY3.hashCode() == ENTRY4.hashCode());
}
@Test
public void testEntryScopeIsAccessIfUnspecified() {
assertEquals(AclEntryScope.ACCESS, ENTRY1.getScope());
assertEquals(AclEntryScope.ACCESS, ENTRY2.getScope());
assertEquals(AclEntryScope.ACCESS, ENTRY3.getScope());
assertEquals(AclEntryScope.DEFAULT, ENTRY4.getScope());
}
@Test
public void testStatusEquals() {
assertNotSame(STATUS1, STATUS2);
assertNotSame(STATUS1, STATUS3);
assertNotSame(STATUS2, STATUS3);
assertEquals(STATUS1, STATUS1);
assertEquals(STATUS2, STATUS2);
assertEquals(STATUS1, STATUS2);
assertEquals(STATUS2, STATUS1);
assertFalse(STATUS1.equals(STATUS3));
assertFalse(STATUS2.equals(STATUS3));
assertFalse(STATUS1.equals(null));
assertFalse(STATUS1.equals(new Object()));
}
@Test
public void testStatusHashCode() {
assertEquals(STATUS1.hashCode(), STATUS2.hashCode());
assertFalse(STATUS1.hashCode() == STATUS3.hashCode());
}
@Test
public void testToString() {
assertEquals("user:user1:rwx", ENTRY1.toString());
assertEquals("user:user1:rwx", ENTRY2.toString());
assertEquals("group:group2:rw-", ENTRY3.toString());
assertEquals("default:other::---", ENTRY4.toString());
assertEquals(
"owner: owner1, group: group1, acl: {entries: [user:user1:rwx, group:group2:rw-, default:other::---], stickyBit: false}",
STATUS1.toString());
assertEquals(
"owner: owner1, group: group1, acl: {entries: [user:user1:rwx, group:group2:rw-, default:other::---], stickyBit: false}",
STATUS2.toString());
assertEquals(
"owner: owner2, group: group2, acl: {entries: [], stickyBit: true}",
STATUS3.toString());
}
}

View File

@ -54,7 +54,7 @@ public class TestFsPermission extends TestCase {
* the expected values back out for all combinations * the expected values back out for all combinations
*/ */
public void testConvertingPermissions() { public void testConvertingPermissions() {
for(short s = 0; s < 01777; s++) { for(short s = 0; s <= 01777; s++) {
assertEquals(s, new FsPermission(s).toShort()); assertEquals(s, new FsPermission(s).toShort());
} }
@ -64,10 +64,12 @@ public class TestFsPermission extends TestCase {
for(FsAction u : FsAction.values()) { for(FsAction u : FsAction.values()) {
for(FsAction g : FsAction.values()) { for(FsAction g : FsAction.values()) {
for(FsAction o : FsAction.values()) { for(FsAction o : FsAction.values()) {
// Cover constructor with sticky bit.
FsPermission f = new FsPermission(u, g, o, sb); FsPermission f = new FsPermission(u, g, o, sb);
assertEquals(s, f.toShort()); assertEquals(s, f.toShort());
FsPermission f2 = new FsPermission(f); FsPermission f2 = new FsPermission(f);
assertEquals(s, f2.toShort()); assertEquals(s, f2.toShort());
s++; s++;
} }
} }
@ -75,48 +77,57 @@ public class TestFsPermission extends TestCase {
} }
} }
public void testStickyBitToString() { public void testSpecialBitsToString() {
// Check that every permission has its sticky bit represented correctly for (boolean sb : new boolean[] { false, true }) {
for(boolean sb : new boolean [] { false, true }) { for (FsAction u : FsAction.values()) {
for(FsAction u : FsAction.values()) { for (FsAction g : FsAction.values()) {
for(FsAction g : FsAction.values()) { for (FsAction o : FsAction.values()) {
for(FsAction o : FsAction.values()) {
FsPermission f = new FsPermission(u, g, o, sb); FsPermission f = new FsPermission(u, g, o, sb);
if(f.getStickyBit() && f.getOtherAction().implies(EXECUTE)) String fString = f.toString();
assertEquals('t', f.toString().charAt(8));
else if(f.getStickyBit() && !f.getOtherAction().implies(EXECUTE)) // Check that sticky bit is represented correctly.
assertEquals('T', f.toString().charAt(8)); if (f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
else if(!f.getStickyBit() && f.getOtherAction().implies(EXECUTE)) assertEquals('t', fString.charAt(8));
assertEquals('x', f.toString().charAt(8)); else if (f.getStickyBit() && !f.getOtherAction().implies(EXECUTE))
assertEquals('T', fString.charAt(8));
else if (!f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
assertEquals('x', fString.charAt(8));
else else
assertEquals('-', f.toString().charAt(8)); assertEquals('-', fString.charAt(8));
assertEquals(9, fString.length());
} }
} }
} }
} }
} }
public void testFsPermission() { public void testFsPermission() {
String symbolic = "-rwxrwxrwx"; String symbolic = "-rwxrwxrwx";
StringBuilder b = new StringBuilder("-123456789");
for(int i = 0; i < (1<<9); i++) { for(int i = 0; i < (1 << 10); i++) {
for(int j = 1; j < 10; j++) { StringBuilder b = new StringBuilder("----------");
b.setCharAt(j, '-'); String binary = String.format("%11s", Integer.toBinaryString(i));
String permBinary = binary.substring(2, binary.length());
int len = permBinary.length();
for(int j = 0; j < len; j++) {
if (permBinary.charAt(j) == '1') {
int k = 9 - (len - 1 - j);
b.setCharAt(k, symbolic.charAt(k));
} }
String binary = Integer.toBinaryString(i);
int len = binary.length();
for(int j = 0; j < len; j++) {
if (binary.charAt(j) == '1') {
int k = 9 - (len - 1 - j);
b.setCharAt(k, symbolic.charAt(k));
}
}
assertEquals(i, FsPermission.valueOf(b.toString()).toShort());
} }
// Check for sticky bit.
if (binary.charAt(1) == '1') {
char replacement = b.charAt(9) == 'x' ? 't' : 'T';
b.setCharAt(9, replacement);
}
assertEquals(i, FsPermission.valueOf(b.toString()).toShort());
} }
}
public void testUMaskParser() throws IOException { public void testUMaskParser() throws IOException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();

View File

@ -0,0 +1,240 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcNoSuchMethodException;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Before;
import org.junit.Test;
public class TestAclCommands {
private Configuration conf = null;
@Before
public void setup() throws IOException {
conf = new Configuration();
}
@Test
public void testGetfaclValidations() throws Exception {
assertFalse("getfacl should fail without path",
0 == runCommand(new String[] { "-getfacl" }));
assertFalse("getfacl should fail with extra argument",
0 == runCommand(new String[] { "-getfacl", "/test", "extraArg" }));
}
@Test
public void testSetfaclValidations() throws Exception {
assertFalse("setfacl should fail without path",
0 == runCommand(new String[] { "-setfacl" }));
assertFalse("setfacl should fail without aclSpec",
0 == runCommand(new String[] { "-setfacl", "-m", "/path" }));
assertFalse("setfacl should fail with conflicting options",
0 == runCommand(new String[] { "-setfacl", "-m", "/path" }));
assertFalse("setfacl should fail with extra arguments",
0 == runCommand(new String[] { "-setfacl", "/path", "extra" }));
assertFalse("setfacl should fail with extra arguments",
0 == runCommand(new String[] { "-setfacl", "--set",
"default:user::rwx", "/path", "extra" }));
assertFalse("setfacl should fail with permissions for -x",
0 == runCommand(new String[] { "-setfacl", "-x", "user:user1:rwx",
"/path" }));
assertFalse("setfacl should fail ACL spec missing",
0 == runCommand(new String[] { "-setfacl", "-m",
"", "/path" }));
}
@Test
public void testMultipleAclSpecParsing() throws Exception {
List<AclEntry> parsedList = AclEntry.parseAclSpec(
"group::rwx,user:user1:rwx,user:user2:rw-,"
+ "group:group1:rw-,default:group:group1:rw-", true);
AclEntry basicAcl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.ALL).build();
AclEntry user1Acl = new AclEntry.Builder().setType(AclEntryType.USER)
.setPermission(FsAction.ALL).setName("user1").build();
AclEntry user2Acl = new AclEntry.Builder().setType(AclEntryType.USER)
.setPermission(FsAction.READ_WRITE).setName("user2").build();
AclEntry group1Acl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.READ_WRITE).setName("group1").build();
AclEntry defaultAcl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.READ_WRITE).setName("group1")
.setScope(AclEntryScope.DEFAULT).build();
List<AclEntry> expectedList = new ArrayList<AclEntry>();
expectedList.add(basicAcl);
expectedList.add(user1Acl);
expectedList.add(user2Acl);
expectedList.add(group1Acl);
expectedList.add(defaultAcl);
assertEquals("Parsed Acl not correct", expectedList, parsedList);
}
@Test
public void testMultipleAclSpecParsingWithoutPermissions() throws Exception {
List<AclEntry> parsedList = AclEntry.parseAclSpec(
"user::,user:user1:,group::,group:group1:,mask::,other::,"
+ "default:user:user1::,default:mask::", false);
AclEntry owner = new AclEntry.Builder().setType(AclEntryType.USER).build();
AclEntry namedUser = new AclEntry.Builder().setType(AclEntryType.USER)
.setName("user1").build();
AclEntry group = new AclEntry.Builder().setType(AclEntryType.GROUP).build();
AclEntry namedGroup = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setName("group1").build();
AclEntry mask = new AclEntry.Builder().setType(AclEntryType.MASK).build();
AclEntry other = new AclEntry.Builder().setType(AclEntryType.OTHER).build();
AclEntry defaultUser = new AclEntry.Builder()
.setScope(AclEntryScope.DEFAULT).setType(AclEntryType.USER)
.setName("user1").build();
AclEntry defaultMask = new AclEntry.Builder()
.setScope(AclEntryScope.DEFAULT).setType(AclEntryType.MASK).build();
List<AclEntry> expectedList = new ArrayList<AclEntry>();
expectedList.add(owner);
expectedList.add(namedUser);
expectedList.add(group);
expectedList.add(namedGroup);
expectedList.add(mask);
expectedList.add(other);
expectedList.add(defaultUser);
expectedList.add(defaultMask);
assertEquals("Parsed Acl not correct", expectedList, parsedList);
}
@Test
public void testLsNoRpcForGetAclStatus() throws Exception {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
conf.setBoolean("stubfs.noRpcForGetAclStatus", true);
assertEquals("ls must succeed even if getAclStatus RPC does not exist.",
0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" }));
}
@Test
public void testLsAclsUnsupported() throws Exception {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
assertEquals("ls must succeed even if FileSystem does not implement ACLs.",
0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" }));
}
public static class StubFileSystem extends FileSystem {
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return null;
}
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return false;
}
public AclStatus getAclStatus(Path path) throws IOException {
if (getConf().getBoolean("stubfs.noRpcForGetAclStatus", false)) {
throw new RemoteException(RpcNoSuchMethodException.class.getName(),
"test exception");
}
return super.getAclStatus(path);
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
if (f.isRoot()) {
return new FileStatus(0, true, 0, 0, 0, f);
}
return null;
}
@Override
public URI getUri() {
return URI.create("stubfs:///");
}
@Override
public Path getWorkingDirectory() {
return null;
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
FsPermission perm = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE,
FsAction.READ_EXECUTE);
Path path = new Path("/foo");
FileStatus stat = new FileStatus(1000, true, 3, 1000, 0, 0, perm, "owner",
"group", path);
return new FileStatus[] { stat };
}
@Override
public boolean mkdirs(Path f, FsPermission permission)
throws IOException {
return false;
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return null;
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
return false;
}
@Override
public void setWorkingDirectory(Path dir) {
}
}
private int runCommand(String[] commands) throws Exception {
return ToolRunner.run(conf, new FsShell(), commands);
}
}

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.fs.viewfs;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
@ -29,6 +31,7 @@ import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.viewfs.ChRootedFileSystem; import org.apache.hadoop.fs.viewfs.ChRootedFileSystem;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
@ -354,6 +357,44 @@ public class TestChRootedFileSystem {
new ChRootedFileSystem(chrootUri, conf); new ChRootedFileSystem(chrootUri, conf);
} }
/**
* Tests that ChRootedFileSystem delegates calls for every ACL method to the
* underlying FileSystem with all Path arguments translated as required to
* enforce chroot.
*/
@Test
public void testAclMethodsPathTranslation() throws IOException {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem)chrootFs.getRawFileSystem())
.getRawFileSystem();
Path chrootPath = new Path("/c");
Path rawPath = new Path("/a/b/c");
List<AclEntry> entries = Collections.emptyList();
chrootFs.modifyAclEntries(chrootPath, entries);
verify(mockFs).modifyAclEntries(rawPath, entries);
chrootFs.removeAclEntries(chrootPath, entries);
verify(mockFs).removeAclEntries(rawPath, entries);
chrootFs.removeDefaultAcl(chrootPath);
verify(mockFs).removeDefaultAcl(rawPath);
chrootFs.removeAcl(chrootPath);
verify(mockFs).removeAcl(rawPath);
chrootFs.setAcl(chrootPath, entries);
verify(mockFs).setAcl(rawPath, entries);
chrootFs.getAclStatus(chrootPath);
verify(mockFs).getAclStatus(rawPath);
}
static class MockFileSystem extends FilterFileSystem { static class MockFileSystem extends FilterFileSystem {
MockFileSystem() { MockFileSystem() {
super(mock(FileSystem.class)); super(mock(FileSystem.class));
@ -361,4 +402,4 @@ public class TestChRootedFileSystem {
@Override @Override
public void initialize(URI name, Configuration conf) throws IOException {} public void initialize(URI name, Configuration conf) throws IOException {}
} }
} }

View File

@ -20,14 +20,19 @@ package org.apache.hadoop.fs.viewfs;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.viewfs.TestChRootedFileSystem.MockFileSystem;
import org.junit.*; import org.junit.*;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
/** /**
* Verify that viewfs propagates certain methods to the underlying fs * Verify that viewfs propagates certain methods to the underlying fs
@ -57,6 +62,15 @@ public class TestViewFileSystemDelegation { //extends ViewFileSystemTestSetup {
return fs; return fs;
} }
private static FileSystem setupMockFileSystem(Configuration conf, URI uri)
throws Exception {
String scheme = uri.getScheme();
conf.set("fs." + scheme + ".impl", MockFileSystem.class.getName());
FileSystem fs = FileSystem.get(uri, conf);
ConfigUtil.addLink(conf, "/mounts/" + scheme, uri);
return ((MockFileSystem)fs).getRawFileSystem();
}
@Test @Test
public void testSanity() { public void testSanity() {
assertEquals("fs1:/", fs1.getUri().toString()); assertEquals("fs1:/", fs1.getUri().toString());
@ -69,6 +83,55 @@ public class TestViewFileSystemDelegation { //extends ViewFileSystemTestSetup {
checkVerifyChecksum(true); checkVerifyChecksum(true);
} }
/**
* Tests that ViewFileSystem dispatches calls for every ACL method through the
* mount table to the correct underlying FileSystem with all Path arguments
* translated as required.
*/
@Test
public void testAclMethods() throws Exception {
Configuration conf = ViewFileSystemTestSetup.createConfig();
FileSystem mockFs1 = setupMockFileSystem(conf, new URI("mockfs1:/"));
FileSystem mockFs2 = setupMockFileSystem(conf, new URI("mockfs2:/"));
FileSystem viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
Path viewFsPath1 = new Path("/mounts/mockfs1/a/b/c");
Path mockFsPath1 = new Path("/a/b/c");
Path viewFsPath2 = new Path("/mounts/mockfs2/d/e/f");
Path mockFsPath2 = new Path("/d/e/f");
List<AclEntry> entries = Collections.emptyList();
viewFs.modifyAclEntries(viewFsPath1, entries);
verify(mockFs1).modifyAclEntries(mockFsPath1, entries);
viewFs.modifyAclEntries(viewFsPath2, entries);
verify(mockFs2).modifyAclEntries(mockFsPath2, entries);
viewFs.removeAclEntries(viewFsPath1, entries);
verify(mockFs1).removeAclEntries(mockFsPath1, entries);
viewFs.removeAclEntries(viewFsPath2, entries);
verify(mockFs2).removeAclEntries(mockFsPath2, entries);
viewFs.removeDefaultAcl(viewFsPath1);
verify(mockFs1).removeDefaultAcl(mockFsPath1);
viewFs.removeDefaultAcl(viewFsPath2);
verify(mockFs2).removeDefaultAcl(mockFsPath2);
viewFs.removeAcl(viewFsPath1);
verify(mockFs1).removeAcl(mockFsPath1);
viewFs.removeAcl(viewFsPath2);
verify(mockFs2).removeAcl(mockFsPath2);
viewFs.setAcl(viewFsPath1, entries);
verify(mockFs1).setAcl(mockFsPath1, entries);
viewFs.setAcl(viewFsPath2, entries);
verify(mockFs2).setAcl(mockFsPath2, entries);
viewFs.getAclStatus(viewFsPath1);
verify(mockFs1).getAclStatus(mockFsPath1);
viewFs.getAclStatus(viewFsPath2);
verify(mockFs2).getAclStatus(mockFsPath2);
}
void checkVerifyChecksum(boolean flag) { void checkVerifyChecksum(boolean flag) {
viewFs.setVerifyChecksum(flag); viewFs.setVerifyChecksum(flag);
assertEquals(flag, fs1.getVerifyChecksum()); assertEquals(flag, fs1.getVerifyChecksum());

View File

@ -13,6 +13,9 @@ Trunk (Unreleased)
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh) HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
HDFS-4685. Implementation of ACLs in HDFS. (See breakdown of tasks below for
features and contributors)
IMPROVEMENTS IMPROVEMENTS
HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common. HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.
@ -259,6 +262,82 @@ Trunk (Unreleased)
HDFS-5794. Fix the inconsistency of layout version number of HDFS-5794. Fix the inconsistency of layout version number of
ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9) ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
BREAKDOWN OF HDFS-4685 SUBTASKS AND RELATED JIRAS
HDFS-5596. Implement RPC stubs. (Haohui Mai via cnauroth)
HDFS-5685. Implement ACL as a INode feature. (Haohui Mai via cnauroth)
HDFS-5618. NameNode: persist ACLs in fsimage. (Haohui Mai via cnauroth)
HDFS-5619. NameNode: record ACL modifications to edit log.
(Haohui Mai via cnauroth)
HDFS-5673. Implement logic for modification of ACLs. (cnauroth)
HDFS-5758. NameNode: complete implementation of inode modifications for
ACLs. (Chris Nauroth via wheat9)
HDFS-5612. NameNode: change all permission checks to enforce ACLs in
addition to permissions. (Chris Nauroth via wheat9)
HDFS-5613. NameNode: implement handling of ACLs in combination with
symlinks. (Chris Nauroth via wheat9)
HDFS-5615. NameNode: implement handling of ACLs in combination with sticky
bit. (Chris Nauroth via wheat9)
HDFS-5702. FsShell Cli: Add XML based End-to-End test for getfacl and
setfacl commands. (Vinay via cnauroth)
HDFS-5608. WebHDFS: implement ACL APIs.
(Sachin Jose and Renil Joseph via cnauroth)
HDFS-5614. NameNode: implement handling of ACLs in combination with
snapshots. (cnauroth)
HDFS-5858. Refactor common ACL test cases to be run through multiple
FileSystem implementations. (cnauroth)
HDFS-5860. Refactor INodeDirectory getDirectoryXFeature methods to use
common getFeature helper method. (Jing Zhao via cnauroth)
HDFS-5861. Add CLI test for Ls output for extended ACL marker.
(Vinay via cnauroth)
HDFS-5616. NameNode: implement default ACL handling. (cnauroth)
HDFS-5899. Add configuration flag to disable/enable support for ACLs.
(cnauroth)
HDFS-5914. Incorporate ACLs with the changes from HDFS-5698.
(Haohui Mai via cnauroth)
HDFS-5625. Write end user documentation for HDFS ACLs. (cnauroth)
HDFS-5925. ACL configuration flag must only reject ACL API calls, not ACLs
present in fsimage or edits. (cnauroth)
HDFS-5923. Do not persist the ACL bit in the FsPermission.
(Haohui Mai via cnauroth)
HDFS-5933. Optimize the FSImage layout for ACLs (Haohui Mai via cnauroth)
HDFS-5932. Ls should display the ACL bit (Chris Nauroth via wheat9)
HDFS-5937. Fix TestOfflineEditsViewer on HDFS-4685 branch. (cnauroth)
HDFS-5737. Replacing only the default ACL can fail to copy unspecified base
entries from the access ACL. (cnauroth)
HDFS-5739. ACL RPC must allow null name or unspecified permissions in ACL
entries. (cnauroth)
HDFS-5799. Make audit logging consistent across ACL APIs. (cnauroth)
HDFS-5849. Removing ACL from an inode fails if it has only a default ACL.
(cnauroth)
Release 2.5.0 - UNRELEASED Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -423,7 +502,8 @@ Release 2.4.0 - UNRELEASED
HDFS-5716. Allow WebHDFS to use pluggable authentication filter HDFS-5716. Allow WebHDFS to use pluggable authentication filter
(Haohui Mai via brandonli) (Haohui Mai via brandonli)
HDFS-5953. TestBlockReaderFactory fails in trunk. (Akira Ajisaka via wang) HDFS-5953. TestBlockReaderFactory fails if libhadoop.so has not been built.
(Akira Ajisaka via wang)
HDFS-5759. Web UI does not show up during the period of loading FSImage. HDFS-5759. Web UI does not show up during the period of loading FSImage.
(Haohui Mai via Arpit Agarwal) (Haohui Mai via Arpit Agarwal)
@ -439,6 +519,8 @@ Release 2.4.0 - UNRELEASED
URLConnectionFactory which does not import SSL certificates. (Haohui Mai via URLConnectionFactory which does not import SSL certificates. (Haohui Mai via
jing9) jing9)
HDFS-5961. OIV cannot load fsimages containing a symbolic link. (kihwal)
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)

View File

@ -482,6 +482,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<includes> <includes>
<include>ClientNamenodeProtocol.proto</include> <include>ClientNamenodeProtocol.proto</include>
<include>NamenodeProtocol.proto</include> <include>NamenodeProtocol.proto</include>
<include>acl.proto</include>
</includes> </includes>
</source> </source>
<output>${project.build.directory}/generated-sources/java</output> <output>${project.build.directory}/generated-sources/java</output>

View File

@ -109,9 +109,12 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@ -2607,6 +2610,95 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
return clientContext; return clientContext;
} }
void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
checkOpen();
try {
namenode.modifyAclEntries(src, aclSpec);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
checkOpen();
try {
namenode.removeAclEntries(src, aclSpec);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
void removeDefaultAcl(String src) throws IOException {
checkOpen();
try {
namenode.removeDefaultAcl(src);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
void removeAcl(String src) throws IOException {
checkOpen();
try {
namenode.removeAcl(src);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
checkOpen();
try {
namenode.setAcl(src, aclSpec);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
SafeModeException.class,
SnapshotAccessControlException.class,
UnresolvedPathException.class);
}
}
AclStatus getAclStatus(String src) throws IOException {
checkOpen();
try {
return namenode.getAclStatus(src);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
AclException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
@Override // RemotePeerFactory @Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr) throws IOException { public Peer newConnectedPeer(InetSocketAddress addr) throws IOException {
Peer peer = null; Peer peer = null;

View File

@ -72,6 +72,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_CLIENT_RETRY_TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT = 3; public static final int DFS_CLIENT_RETRY_TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT = 3;
public static final String DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH = "dfs.client.retry.interval-ms.get-last-block-length"; public static final String DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH = "dfs.client.retry.interval-ms.get-last-block-length";
public static final int DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT = 4000; public static final int DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT = 4000;
public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
// HA related configuration // HA related configuration
public static final String DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX = "dfs.client.failover.proxy.provider"; public static final String DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX = "dfs.client.failover.proxy.provider";
@ -184,6 +186,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true; public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
public static final String DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup"; public static final String DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup";
public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup"; public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
public static final String DFS_NAMENODE_ACLS_ENABLED_KEY = "dfs.namenode.acls.enabled";
public static final boolean DFS_NAMENODE_ACLS_ENABLED_DEFAULT = false;
public static final String DFS_ADMIN = "dfs.cluster.administrators"; public static final String DFS_ADMIN = "dfs.cluster.administrators";
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource"; public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource";
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml"; public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";

View File

@ -54,6 +54,8 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@ -1749,4 +1751,130 @@ public class DistributedFileSystem extends FileSystem {
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException { public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
return dfs.listCachePools(); return dfs.listCachePools();
} }
/**
* {@inheritDoc}
*/
@Override
public void modifyAclEntries(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.modifyAclEntries(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.modifyAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void removeAclEntries(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.removeAclEntries(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.removeAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void removeDefaultAcl(Path path) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.removeDefaultAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.removeDefaultAcl(p);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void removeAcl(Path path) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.removeAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.removeAcl(p);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public void setAcl(Path path, final List<AclEntry> aclSpec) throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.setAcl(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.setAcl(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* {@inheritDoc}
*/
@Override
public AclStatus getAclStatus(Path path) throws IOException {
final Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<AclStatus>() {
@Override
public AclStatus doCall(final Path p) throws IOException {
return dfs.getAclStatus(getPathName(p));
}
@Override
public AclStatus next(final FileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getAclStatus(p);
}
}.resolve(this, absF);
}
} }

View File

@ -0,0 +1,39 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Indicates a failure manipulating an ACL.
*/
@InterfaceAudience.Private
public class AclException extends IOException {
private static final long serialVersionUID = 1L;
/**
* Creates a new AclException.
*
* @param message String message
*/
public AclException(String message) {
super(message);
}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.protocol;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -34,6 +35,8 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@ -1194,4 +1197,49 @@ public interface ClientProtocol {
@Idempotent @Idempotent
public BatchedEntries<CachePoolEntry> listCachePools(String prevPool) public BatchedEntries<CachePoolEntry> listCachePools(String prevPool)
throws IOException; throws IOException;
/**
* Modifies ACL entries of files and directories. This method can add new ACL
* entries or modify the permissions on existing ACL entries. All existing
* ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.)
*/
@Idempotent
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException;
/**
* Removes ACL entries from files and directories. Other ACL entries are
* retained.
*/
@Idempotent
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException;
/**
* Removes all default ACL entries from files and directories.
*/
@Idempotent
public void removeDefaultAcl(String src) throws IOException;
/**
* Removes all but the base ACL entries of files and directories. The entries
* for user, group, and others are retained for compatibility with permission
* bits.
*/
@Idempotent
public void removeAcl(String src) throws IOException;
/**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*/
@Idempotent
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException;
/**
* Gets the ACLs of files and directories.
*/
@Idempotent
public AclStatus getAclStatus(String src) throws IOException;
} }

View File

@ -122,7 +122,11 @@ public class LayoutVersion {
+ " Use distinct StorageUuid per storage directory."), + " Use distinct StorageUuid per storage directory."),
ADD_LAYOUT_FLAGS(-50, "Add support for layout flags."), ADD_LAYOUT_FLAGS(-50, "Add support for layout flags."),
CACHING(-51, "Support for cache pools and path-based caching"), CACHING(-51, "Support for cache pools and path-based caching"),
PROTOBUF_FORMAT(-52, "Use protobuf to serialize FSImage"); // Hadoop 2.4.0
PROTOBUF_FORMAT(-52, "Use protobuf to serialize FSImage"),
EXTENDED_ACL(-53, "Extended ACL"),
RESERVED_REL2_4_0(-54, -51, "Reserved for release 2.4.0", true,
PROTOBUF_FORMAT, EXTENDED_ACL);
private final FeatureInfo info; private final FeatureInfo info;

View File

@ -38,6 +38,18 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@ -273,6 +285,24 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
VOID_SETBALANCERBANDWIDTH_RESPONSE = VOID_SETBALANCERBANDWIDTH_RESPONSE =
SetBalancerBandwidthResponseProto.newBuilder().build(); SetBalancerBandwidthResponseProto.newBuilder().build();
private static final SetAclResponseProto
VOID_SETACL_RESPONSE = SetAclResponseProto.getDefaultInstance();
private static final ModifyAclEntriesResponseProto
VOID_MODIFYACLENTRIES_RESPONSE = ModifyAclEntriesResponseProto
.getDefaultInstance();
private static final RemoveAclEntriesResponseProto
VOID_REMOVEACLENTRIES_RESPONSE = RemoveAclEntriesResponseProto
.getDefaultInstance();
private static final RemoveDefaultAclResponseProto
VOID_REMOVEDEFAULTACL_RESPONSE = RemoveDefaultAclResponseProto
.getDefaultInstance();
private static final RemoveAclResponseProto
VOID_REMOVEACL_RESPONSE = RemoveAclResponseProto.getDefaultInstance();
/** /**
* Constructor * Constructor
* *
@ -1162,4 +1192,73 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
throw new ServiceException(e); throw new ServiceException(e);
} }
} }
@Override
public ModifyAclEntriesResponseProto modifyAclEntries(
RpcController controller, ModifyAclEntriesRequestProto req)
throws ServiceException {
try {
server.modifyAclEntries(req.getSrc(), PBHelper.convertAclEntry(req.getAclSpecList()));
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_MODIFYACLENTRIES_RESPONSE;
}
@Override
public RemoveAclEntriesResponseProto removeAclEntries(
RpcController controller, RemoveAclEntriesRequestProto req)
throws ServiceException {
try {
server.removeAclEntries(req.getSrc(),
PBHelper.convertAclEntry(req.getAclSpecList()));
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REMOVEACLENTRIES_RESPONSE;
}
@Override
public RemoveDefaultAclResponseProto removeDefaultAcl(
RpcController controller, RemoveDefaultAclRequestProto req)
throws ServiceException {
try {
server.removeDefaultAcl(req.getSrc());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REMOVEDEFAULTACL_RESPONSE;
}
@Override
public RemoveAclResponseProto removeAcl(RpcController controller,
RemoveAclRequestProto req) throws ServiceException {
try {
server.removeAcl(req.getSrc());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REMOVEACL_RESPONSE;
}
@Override
public SetAclResponseProto setAcl(RpcController controller,
SetAclRequestProto req) throws ServiceException {
try {
server.setAcl(req.getSrc(), PBHelper.convertAclEntry(req.getAclSpecList()));
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_SETACL_RESPONSE;
}
@Override
public GetAclStatusResponseProto getAclStatus(RpcController controller,
GetAclStatusRequestProto req) throws ServiceException {
try {
return PBHelper.convert(server.getAclStatus(req.getSrc()));
} catch (IOException e) {
throw new ServiceException(e);
}
}
} }

View File

@ -22,6 +22,7 @@ import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -34,6 +35,8 @@ import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@ -57,6 +60,12 @@ import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.RemoveDefaultAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.SetAclRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto;
@ -1183,4 +1192,76 @@ public class ClientNamenodeProtocolTranslatorPB implements
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
@Override
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
ModifyAclEntriesRequestProto req = ModifyAclEntriesRequestProto
.newBuilder().setSrc(src)
.addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec)).build();
try {
rpcProxy.modifyAclEntries(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
RemoveAclEntriesRequestProto req = RemoveAclEntriesRequestProto
.newBuilder().setSrc(src)
.addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec)).build();
try {
rpcProxy.removeAclEntries(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void removeDefaultAcl(String src) throws IOException {
RemoveDefaultAclRequestProto req = RemoveDefaultAclRequestProto
.newBuilder().setSrc(src).build();
try {
rpcProxy.removeDefaultAcl(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void removeAcl(String src) throws IOException {
RemoveAclRequestProto req = RemoveAclRequestProto.newBuilder()
.setSrc(src).build();
try {
rpcProxy.removeAcl(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
SetAclRequestProto req = SetAclRequestProto.newBuilder()
.setSrc(src)
.addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec))
.build();
try {
rpcProxy.setAcl(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public AclStatus getAclStatus(String src) throws IOException {
GetAclStatusRequestProto req = GetAclStatusRequestProto.newBuilder()
.setSrc(src).build();
try {
return PBHelper.convert(rpcProxy.getAclStatus(null, req));
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
} }

View File

@ -32,6 +32,11 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos; import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
@ -65,6 +70,12 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
@ -199,6 +210,13 @@ public class PBHelper {
RegisterCommandProto.newBuilder().build(); RegisterCommandProto.newBuilder().build();
private static final RegisterCommand REG_CMD = new RegisterCommand(); private static final RegisterCommand REG_CMD = new RegisterCommand();
private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES =
AclEntryScope.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES =
AclEntryType.values();
private static final FsAction[] FSACTION_VALUES =
FsAction.values();
private PBHelper() { private PBHelper() {
/** Hidden constructor */ /** Hidden constructor */
} }
@ -207,6 +225,10 @@ public class PBHelper {
return ByteString.copyFrom(bytes); return ByteString.copyFrom(bytes);
} }
private static <T extends Enum<T>, U extends Enum<U>> U castEnum(T from, U[] to) {
return to[from.ordinal()];
}
public static NamenodeRole convert(NamenodeRoleProto role) { public static NamenodeRole convert(NamenodeRoleProto role) {
switch (role) { switch (role) {
case NAMENODE: case NAMENODE:
@ -760,8 +782,9 @@ public class PBHelper {
return REG_CMD; return REG_CMD;
case BlockIdCommand: case BlockIdCommand:
return PBHelper.convert(proto.getBlkIdCmd()); return PBHelper.convert(proto.getBlkIdCmd());
default:
return null;
} }
return null;
} }
public static BalancerBandwidthCommandProto convert( public static BalancerBandwidthCommandProto convert(
@ -1961,5 +1984,74 @@ public class PBHelper {
assert size >= 0; assert size >= 0;
return new ExactSizeInputStream(input, size); return new ExactSizeInputStream(input, size);
} }
private static AclEntryScopeProto convert(AclEntryScope v) {
return AclEntryScopeProto.valueOf(v.ordinal());
}
private static AclEntryScope convert(AclEntryScopeProto v) {
return castEnum(v, ACL_ENTRY_SCOPE_VALUES);
}
private static AclEntryTypeProto convert(AclEntryType e) {
return AclEntryTypeProto.valueOf(e.ordinal());
}
private static AclEntryType convert(AclEntryTypeProto v) {
return castEnum(v, ACL_ENTRY_TYPE_VALUES);
}
private static FsActionProto convert(FsAction v) {
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
}
private static FsAction convert(FsActionProto v) {
return castEnum(v, FSACTION_VALUES);
}
public static List<AclEntryProto> convertAclEntryProto(
List<AclEntry> aclSpec) {
ArrayList<AclEntryProto> r = Lists.newArrayListWithCapacity(aclSpec.size());
for (AclEntry e : aclSpec) {
AclEntryProto.Builder builder = AclEntryProto.newBuilder();
builder.setType(convert(e.getType()));
builder.setScope(convert(e.getScope()));
builder.setPermissions(convert(e.getPermission()));
if (e.getName() != null) {
builder.setName(e.getName());
}
r.add(builder.build());
}
return r;
}
public static List<AclEntry> convertAclEntry(List<AclEntryProto> aclSpec) {
ArrayList<AclEntry> r = Lists.newArrayListWithCapacity(aclSpec.size());
for (AclEntryProto e : aclSpec) {
AclEntry.Builder builder = new AclEntry.Builder();
builder.setType(convert(e.getType()));
builder.setScope(convert(e.getScope()));
builder.setPermission(convert(e.getPermissions()));
if (e.hasName()) {
builder.setName(e.getName());
}
r.add(builder.build());
}
return r;
}
public static AclStatus convert(GetAclStatusResponseProto e) {
AclStatusProto r = e.getResult();
return new AclStatus.Builder().owner(r.getOwner()).group(r.getGroup())
.stickyBit(r.getSticky())
.addEntries(convertAclEntry(r.getEntriesList())).build();
}
public static GetAclStatusResponseProto convert(AclStatus e) {
AclStatusProto r = AclStatusProto.newBuilder().setOwner(e.getOwner())
.setGroup(e.getGroup()).setSticky(e.isStickyBit())
.addAllEntries(convertAclEntryProto(e.getEntries())).build();
return GetAclStatusResponseProto.newBuilder().setResult(r).build();
}
} }

View File

@ -0,0 +1,56 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.AclException;
/**
* Support for ACLs is controlled by a configuration flag. If the configuration
* flag is false, then the NameNode will reject all ACL-related operations.
*/
final class AclConfigFlag {
private final boolean enabled;
/**
* Creates a new AclConfigFlag from configuration.
*
* @param conf Configuration to check
*/
public AclConfigFlag(Configuration conf) {
enabled = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT);
LogFactory.getLog(AclConfigFlag.class).info("ACLs enabled? " + enabled);
}
/**
* Checks the flag on behalf of an ACL API call.
*
* @throws AclException if ACLs are disabled
*/
public void checkForApiCall() throws AclException {
if (!enabled) {
throw new AclException(String.format(
"The ACL operation has been rejected. "
+ "Support for ACLs has been disabled by setting %s to false.",
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY));
}
}
}

View File

@ -0,0 +1,43 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
/**
* Feature that represents the ACLs of the inode.
*/
@InterfaceAudience.Private
public class AclFeature implements INode.Feature {
public static final List<AclEntry> EMPTY_ENTRY_LIST = Collections.emptyList();
private final List<AclEntry> entries;
public AclFeature(List<AclEntry> entries) {
this.entries = entries;
}
public List<AclEntry> getEntries() {
return entries;
}
}

View File

@ -0,0 +1,406 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Collections;
import java.util.List;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
/**
* AclStorage contains utility methods that define how ACL data is stored in the
* namespace.
*
* If an inode has an ACL, then the ACL bit is set in the inode's
* {@link FsPermission} and the inode also contains an {@link AclFeature}. For
* the access ACL, the owner and other entries are identical to the owner and
* other bits stored in FsPermission, so we reuse those. The access mask entry
* is stored into the group permission bits of FsPermission. This is consistent
* with other file systems' implementations of ACLs and eliminates the need for
* special handling in various parts of the codebase. For example, if a user
* calls chmod to change group permission bits on a file with an ACL, then the
* expected behavior is to change the ACL's mask entry. By saving the mask entry
* into the group permission bits, chmod continues to work correctly without
* special handling. All remaining access entries (named users and named groups)
* are stored as explicit {@link AclEntry} instances in a list inside the
* AclFeature. Additionally, all default entries are stored in the AclFeature.
*
* The methods in this class encapsulate these rules for reading or writing the
* ACL entries to the appropriate location.
*
* The methods in this class assume that input ACL entry lists have already been
* validated and sorted according to the rules enforced by
* {@link AclTransformation}.
*/
@InterfaceAudience.Private
final class AclStorage {
/**
* If a default ACL is defined on a parent directory, then copies that default
* ACL to a newly created child file or directory.
*
* @param child INode newly created child
*/
public static void copyINodeDefaultAcl(INode child) {
INodeDirectory parent = child.getParent();
AclFeature parentAclFeature = parent.getAclFeature();
if (parentAclFeature == null || !(child.isFile() || child.isDirectory())) {
return;
}
// Split parent's entries into access vs. default.
List<AclEntry> featureEntries = parent.getAclFeature().getEntries();
ScopedAclEntries scopedEntries = new ScopedAclEntries(featureEntries);
List<AclEntry> parentDefaultEntries = scopedEntries.getDefaultEntries();
// The parent may have an access ACL but no default ACL. If so, exit.
if (parentDefaultEntries.isEmpty()) {
return;
}
// Pre-allocate list size for access entries to copy from parent.
List<AclEntry> accessEntries = Lists.newArrayListWithCapacity(
parentDefaultEntries.size());
FsPermission childPerm = child.getFsPermission();
// Copy each default ACL entry from parent to new child's access ACL.
boolean parentDefaultIsMinimal = isMinimalAcl(parentDefaultEntries);
for (AclEntry entry: parentDefaultEntries) {
AclEntryType type = entry.getType();
String name = entry.getName();
AclEntry.Builder builder = new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(type)
.setName(name);
// The child's initial permission bits are treated as the mode parameter,
// which can filter copied permission values for owner, mask and other.
final FsAction permission;
if (type == AclEntryType.USER && name == null) {
permission = entry.getPermission().and(childPerm.getUserAction());
} else if (type == AclEntryType.GROUP && parentDefaultIsMinimal) {
// This only happens if the default ACL is a minimal ACL: exactly 3
// entries corresponding to owner, group and other. In this case,
// filter the group permissions.
permission = entry.getPermission().and(childPerm.getGroupAction());
} else if (type == AclEntryType.MASK) {
// Group bits from mode parameter filter permission of mask entry.
permission = entry.getPermission().and(childPerm.getGroupAction());
} else if (type == AclEntryType.OTHER) {
permission = entry.getPermission().and(childPerm.getOtherAction());
} else {
permission = entry.getPermission();
}
builder.setPermission(permission);
accessEntries.add(builder.build());
}
// A new directory also receives a copy of the parent's default ACL.
List<AclEntry> defaultEntries = child.isDirectory() ? parentDefaultEntries :
Collections.<AclEntry>emptyList();
final FsPermission newPerm;
if (!isMinimalAcl(accessEntries) || !defaultEntries.isEmpty()) {
// Save the new ACL to the child.
child.addAclFeature(createAclFeature(accessEntries, defaultEntries));
newPerm = createFsPermissionForExtendedAcl(accessEntries, childPerm);
} else {
// The child is receiving a minimal ACL.
newPerm = createFsPermissionForMinimalAcl(accessEntries, childPerm);
}
child.setPermission(newPerm);
}
/**
* Reads the existing extended ACL entries of an inode. This method returns
* only the extended ACL entries stored in the AclFeature. If the inode does
* not have an ACL, then this method returns an empty list. This method
* supports querying by snapshot ID.
*
* @param inode INode to read
* @param snapshotId int ID of snapshot to read
* @return List<AclEntry> containing extended inode ACL entries
*/
public static List<AclEntry> readINodeAcl(INode inode, int snapshotId) {
AclFeature f = inode.getAclFeature(snapshotId);
return f == null ? ImmutableList.<AclEntry> of() : f.getEntries();
}
/**
* Reads the existing ACL of an inode. This method always returns the full
* logical ACL of the inode after reading relevant data from the inode's
* {@link FsPermission} and {@link AclFeature}. Note that every inode
* logically has an ACL, even if no ACL has been set explicitly. If the inode
* does not have an extended ACL, then the result is a minimal ACL consising of
* exactly 3 entries that correspond to the owner, group and other permissions.
* This method always reads the inode's current state and does not support
* querying by snapshot ID. This is because the method is intended to support
* ACL modification APIs, which always apply a delta on top of current state.
*
* @param inode INode to read
* @return List<AclEntry> containing all logical inode ACL entries
*/
public static List<AclEntry> readINodeLogicalAcl(INode inode) {
FsPermission perm = inode.getFsPermission();
AclFeature f = inode.getAclFeature();
if (f == null) {
return getMinimalAcl(perm);
}
final List<AclEntry> existingAcl;
// Split ACL entries stored in the feature into access vs. default.
List<AclEntry> featureEntries = f.getEntries();
ScopedAclEntries scoped = new ScopedAclEntries(featureEntries);
List<AclEntry> accessEntries = scoped.getAccessEntries();
List<AclEntry> defaultEntries = scoped.getDefaultEntries();
// Pre-allocate list size for the explicit entries stored in the feature
// plus the 3 implicit entries (owner, group and other) from the permission
// bits.
existingAcl = Lists.newArrayListWithCapacity(featureEntries.size() + 3);
if (!accessEntries.isEmpty()) {
// Add owner entry implied from user permission bits.
existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER).setPermission(perm.getUserAction())
.build());
// Next add all named user and group entries taken from the feature.
existingAcl.addAll(accessEntries);
// Add mask entry implied from group permission bits.
existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.MASK).setPermission(perm.getGroupAction())
.build());
// Add other entry implied from other permission bits.
existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER).setPermission(perm.getOtherAction())
.build());
} else {
// It's possible that there is a default ACL but no access ACL. In this
// case, add the minimal access ACL implied by the permission bits.
existingAcl.addAll(getMinimalAcl(perm));
}
// Add all default entries after the access entries.
existingAcl.addAll(defaultEntries);
// The above adds entries in the correct order, so no need to sort here.
return existingAcl;
}
/**
* Completely removes the ACL from an inode.
*
* @param inode INode to update
* @param snapshotId int latest snapshot ID of inode
* @throws QuotaExceededException if quota limit is exceeded
*/
public static void removeINodeAcl(INode inode, int snapshotId)
throws QuotaExceededException {
AclFeature f = inode.getAclFeature();
if (f == null) {
return;
}
FsPermission perm = inode.getFsPermission();
List<AclEntry> featureEntries = f.getEntries();
if (featureEntries.get(0).getScope() == AclEntryScope.ACCESS) {
// Restore group permissions from the feature's entry to permission
// bits, overwriting the mask, which is not part of a minimal ACL.
AclEntry groupEntryKey = new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS).setType(AclEntryType.GROUP).build();
int groupEntryIndex = Collections.binarySearch(featureEntries,
groupEntryKey, AclTransformation.ACL_ENTRY_COMPARATOR);
assert groupEntryIndex >= 0;
FsAction groupPerm = featureEntries.get(groupEntryIndex).getPermission();
FsPermission newPerm = new FsPermission(perm.getUserAction(), groupPerm,
perm.getOtherAction(), perm.getStickyBit());
inode.setPermission(newPerm, snapshotId);
}
inode.removeAclFeature(snapshotId);
}
/**
* Updates an inode with a new ACL. This method takes a full logical ACL and
* stores the entries to the inode's {@link FsPermission} and
* {@link AclFeature}.
*
* @param inode INode to update
* @param newAcl List<AclEntry> containing new ACL entries
* @param snapshotId int latest snapshot ID of inode
* @throws AclException if the ACL is invalid for the given inode
* @throws QuotaExceededException if quota limit is exceeded
*/
public static void updateINodeAcl(INode inode, List<AclEntry> newAcl,
int snapshotId) throws AclException, QuotaExceededException {
assert newAcl.size() >= 3;
FsPermission perm = inode.getFsPermission();
final FsPermission newPerm;
if (!isMinimalAcl(newAcl)) {
// This is an extended ACL. Split entries into access vs. default.
ScopedAclEntries scoped = new ScopedAclEntries(newAcl);
List<AclEntry> accessEntries = scoped.getAccessEntries();
List<AclEntry> defaultEntries = scoped.getDefaultEntries();
// Only directories may have a default ACL.
if (!defaultEntries.isEmpty() && !inode.isDirectory()) {
throw new AclException(
"Invalid ACL: only directories may have a default ACL.");
}
// Attach entries to the feature.
if (inode.getAclFeature() != null) {
inode.removeAclFeature(snapshotId);
}
inode.addAclFeature(createAclFeature(accessEntries, defaultEntries),
snapshotId);
newPerm = createFsPermissionForExtendedAcl(accessEntries, perm);
} else {
// This is a minimal ACL. Remove the ACL feature if it previously had one.
if (inode.getAclFeature() != null) {
inode.removeAclFeature(snapshotId);
}
newPerm = createFsPermissionForMinimalAcl(newAcl, perm);
}
inode.setPermission(newPerm, snapshotId);
}
/**
* There is no reason to instantiate this class.
*/
private AclStorage() {
}
/**
* Creates an AclFeature from the given ACL entries.
*
* @param accessEntries List<AclEntry> access ACL entries
* @param defaultEntries List<AclEntry> default ACL entries
* @return AclFeature containing the required ACL entries
*/
private static AclFeature createAclFeature(List<AclEntry> accessEntries,
List<AclEntry> defaultEntries) {
// Pre-allocate list size for the explicit entries stored in the feature,
// which is all entries minus the 3 entries implicitly stored in the
// permission bits.
List<AclEntry> featureEntries = Lists.newArrayListWithCapacity(
(accessEntries.size() - 3) + defaultEntries.size());
// For the access ACL, the feature only needs to hold the named user and
// group entries. For a correctly sorted ACL, these will be in a
// predictable range.
if (!isMinimalAcl(accessEntries)) {
featureEntries.addAll(
accessEntries.subList(1, accessEntries.size() - 2));
}
// Add all default entries to the feature.
featureEntries.addAll(defaultEntries);
return new AclFeature(Collections.unmodifiableList(featureEntries));
}
/**
* Creates the new FsPermission for an inode that is receiving an extended
* ACL, based on its access ACL entries. For a correctly sorted ACL, the
* first entry is the owner and the last 2 entries are the mask and other
* entries respectively. Also preserve sticky bit and toggle ACL bit on.
*
* @param accessEntries List<AclEntry> access ACL entries
* @param existingPerm FsPermission existing permissions
* @return FsPermission new permissions
*/
private static FsPermission createFsPermissionForExtendedAcl(
List<AclEntry> accessEntries, FsPermission existingPerm) {
return new FsPermission(accessEntries.get(0).getPermission(),
accessEntries.get(accessEntries.size() - 2).getPermission(),
accessEntries.get(accessEntries.size() - 1).getPermission(),
existingPerm.getStickyBit());
}
/**
* Creates the new FsPermission for an inode that is receiving a minimal ACL,
* based on its access ACL entries. For a correctly sorted ACL, the owner,
* group and other permissions are in order. Also preserve sticky bit and
* toggle ACL bit off.
*
* @param accessEntries List<AclEntry> access ACL entries
* @param existingPerm FsPermission existing permissions
* @return FsPermission new permissions
*/
private static FsPermission createFsPermissionForMinimalAcl(
List<AclEntry> accessEntries, FsPermission existingPerm) {
return new FsPermission(accessEntries.get(0).getPermission(),
accessEntries.get(1).getPermission(),
accessEntries.get(2).getPermission(),
existingPerm.getStickyBit());
}
/**
* Translates the given permission bits to the equivalent minimal ACL.
*
* @param perm FsPermission to translate
* @return List<AclEntry> containing exactly 3 entries representing the owner,
* group and other permissions
*/
private static List<AclEntry> getMinimalAcl(FsPermission perm) {
return Lists.newArrayList(
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.setPermission(perm.getUserAction())
.build(),
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.GROUP)
.setPermission(perm.getGroupAction())
.build(),
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.setPermission(perm.getOtherAction())
.build());
}
/**
* Checks if the given entries represent a minimal ACL (contains exactly 3
* entries).
*
* @param entries List<AclEntry> entries to check
* @return boolean true if the entries represent a minimal ACL
*/
private static boolean isMinimalAcl(List<AclEntry> entries) {
return entries.size() == 3;
}
}

View File

@ -0,0 +1,485 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import com.google.common.base.Objects;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Ordering;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AclException;
/**
* AclTransformation defines the operations that can modify an ACL. All ACL
* modifications take as input an existing ACL and apply logic to add new
* entries, modify existing entries or remove old entries. Some operations also
* accept an ACL spec: a list of entries that further describes the requested
* change. Different operations interpret the ACL spec differently. In the
* case of adding an ACL to an inode that previously did not have one, the
* existing ACL can be a "minimal ACL" containing exactly 3 entries for owner,
* group and other, all derived from the {@link FsPermission} bits.
*
* The algorithms implemented here require sorted lists of ACL entries. For any
* existing ACL, it is assumed that the entries are sorted. This is because all
* ACL creation and modification is intended to go through these methods, and
* they all guarantee correct sort order in their outputs. However, an ACL spec
* is considered untrusted user input, so all operations pre-sort the ACL spec as
* the first step.
*/
@InterfaceAudience.Private
final class AclTransformation {
private static final int MAX_ENTRIES = 32;
/**
* Filters (discards) any existing ACL entries that have the same scope, type
* and name of any entry in the ACL spec. If necessary, recalculates the mask
* entries. If necessary, default entries may be inferred by copying the
* permissions of the corresponding access entries. It is invalid to request
* removal of the mask entry from an ACL that would otherwise require a mask
* entry, due to existing named entries or an unnamed group entry.
*
* @param existingAcl List<AclEntry> existing ACL
* @param inAclSpec List<AclEntry> ACL spec describing entries to filter
* @return List<AclEntry> new ACL
* @throws AclException if validation fails
*/
public static List<AclEntry> filterAclEntriesByAclSpec(
List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
if (aclSpec.containsKey(existingEntry)) {
scopeDirty.add(existingEntry.getScope());
if (existingEntry.getType() == MASK) {
maskDirty.add(existingEntry.getScope());
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
/**
* Filters (discards) any existing default ACL entries. The new ACL retains
* only the access ACL entries.
*
* @param existingAcl List<AclEntry> existing ACL
* @return List<AclEntry> new ACL
* @throws AclException if validation fails
*/
public static List<AclEntry> filterDefaultAclEntries(
List<AclEntry> existingAcl) throws AclException {
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
for (AclEntry existingEntry: existingAcl) {
if (existingEntry.getScope() == DEFAULT) {
// Default entries sort after access entries, so we can exit early.
break;
}
aclBuilder.add(existingEntry);
}
return buildAndValidateAcl(aclBuilder);
}
/**
* Merges the entries of the ACL spec into the existing ACL. If necessary,
* recalculates the mask entries. If necessary, default entries may be
* inferred by copying the permissions of the corresponding access entries.
*
* @param existingAcl List<AclEntry> existing ACL
* @param inAclSpec List<AclEntry> ACL spec containing entries to merge
* @return List<AclEntry> new ACL
* @throws AclException if validation fails
*/
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
List<AclEntry> foundAclSpecEntries =
Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry);
if (aclSpecEntry != null) {
foundAclSpecEntries.add(aclSpecEntry);
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
// ACL spec entries that were not replacements are new additions.
for (AclEntry newEntry: aclSpec) {
if (Collections.binarySearch(foundAclSpecEntries, newEntry,
ACL_ENTRY_COMPARATOR) < 0) {
scopeDirty.add(newEntry.getScope());
if (newEntry.getType() == MASK) {
providedMask.put(newEntry.getScope(), newEntry);
maskDirty.add(newEntry.getScope());
} else {
aclBuilder.add(newEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
/**
* Completely replaces the ACL with the entries of the ACL spec. If
* necessary, recalculates the mask entries. If necessary, default entries
* are inferred by copying the permissions of the corresponding access
* entries. Replacement occurs separately for each of the access ACL and the
* default ACL. If the ACL spec contains only access entries, then the
* existing default entries are retained. If the ACL spec contains only
* default entries, then the existing access entries are retained. If the ACL
* spec contains both access and default entries, then both are replaced.
*
* @param existingAcl List<AclEntry> existing ACL
* @param inAclSpec List<AclEntry> ACL spec containing replacement entries
* @return List<AclEntry> new ACL
* @throws AclException if validation fails
*/
public static List<AclEntry> replaceAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
// Replacement is done separately for each scope: access and default.
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry aclSpecEntry: aclSpec) {
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
}
// Copy existing entries if the scope was not replaced.
for (AclEntry existingEntry: existingAcl) {
if (!scopeDirty.contains(existingEntry.getScope())) {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
/**
* There is no reason to instantiate this class.
*/
private AclTransformation() {
}
/**
* Comparator that enforces required ordering for entries within an ACL:
* -owner entry (unnamed user)
* -all named user entries (internal ordering undefined)
* -owning group entry (unnamed group)
* -all named group entries (internal ordering undefined)
* -mask entry
* -other entry
* All access ACL entries sort ahead of all default ACL entries.
*/
static final Comparator<AclEntry> ACL_ENTRY_COMPARATOR =
new Comparator<AclEntry>() {
@Override
public int compare(AclEntry entry1, AclEntry entry2) {
return ComparisonChain.start()
.compare(entry1.getScope(), entry2.getScope(),
Ordering.explicit(ACCESS, DEFAULT))
.compare(entry1.getType(), entry2.getType(),
Ordering.explicit(USER, GROUP, MASK, OTHER))
.compare(entry1.getName(), entry2.getName(),
Ordering.natural().nullsFirst())
.result();
}
};
/**
* Builds the final list of ACL entries to return by trimming, sorting and
* validating the ACL entries that have been added.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
* @return List<AclEntry> unmodifiable, sorted list of ACL entries
* @throws AclException if validation fails
*/
private static List<AclEntry> buildAndValidateAcl(
ArrayList<AclEntry> aclBuilder) throws AclException {
if (aclBuilder.size() > MAX_ENTRIES) {
throw new AclException("Invalid ACL: ACL has " + aclBuilder.size() +
" entries, which exceeds maximum of " + MAX_ENTRIES + ".");
}
aclBuilder.trimToSize();
Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR);
// Full iteration to check for duplicates and invalid named entries.
AclEntry prevEntry = null;
for (AclEntry entry: aclBuilder) {
if (prevEntry != null &&
ACL_ENTRY_COMPARATOR.compare(prevEntry, entry) == 0) {
throw new AclException(
"Invalid ACL: multiple entries with same scope, type and name.");
}
if (entry.getName() != null && (entry.getType() == MASK ||
entry.getType() == OTHER)) {
throw new AclException(
"Invalid ACL: this entry type must not have a name: " + entry + ".");
}
prevEntry = entry;
}
// Search for the required base access entries. If there is a default ACL,
// then do the same check on the default entries.
ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
for (AclEntryType type: EnumSet.of(USER, GROUP, OTHER)) {
AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS)
.setType(type).build();
if (Collections.binarySearch(scopedEntries.getAccessEntries(),
accessEntryKey, ACL_ENTRY_COMPARATOR) < 0) {
throw new AclException(
"Invalid ACL: the user, group and other entries are required.");
}
if (!scopedEntries.getDefaultEntries().isEmpty()) {
AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT)
.setType(type).build();
if (Collections.binarySearch(scopedEntries.getDefaultEntries(),
defaultEntryKey, ACL_ENTRY_COMPARATOR) < 0) {
throw new AclException(
"Invalid default ACL: the user, group and other entries are required.");
}
}
}
return Collections.unmodifiableList(aclBuilder);
}
/**
* Calculates mask entries required for the ACL. Mask calculation is performed
* separately for each scope: access and default. This method is responsible
* for handling the following cases of mask calculation:
* 1. Throws an exception if the caller attempts to remove the mask entry of an
* existing ACL that requires it. If the ACL has any named entries, then a
* mask entry is required.
* 2. If the caller supplied a mask in the ACL spec, use it.
* 3. If the caller did not supply a mask, but there are ACL entry changes in
* this scope, then automatically calculate a new mask. The permissions of
* the new mask are the union of the permissions on the group entry and all
* named entries.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
* @param providedMask EnumMap<AclEntryScope, AclEntry> mapping each scope to
* the mask entry that was provided for that scope (if provided)
* @param maskDirty EnumSet<AclEntryScope> which contains a scope if the mask
* entry is dirty (added or deleted) in that scope
* @param scopeDirty EnumSet<AclEntryScope> which contains a scope if any entry
* is dirty (added or deleted) in that scope
* @throws AclException if validation fails
*/
private static void calculateMasks(List<AclEntry> aclBuilder,
EnumMap<AclEntryScope, AclEntry> providedMask,
EnumSet<AclEntryScope> maskDirty, EnumSet<AclEntryScope> scopeDirty)
throws AclException {
EnumSet<AclEntryScope> scopeFound = EnumSet.noneOf(AclEntryScope.class);
EnumMap<AclEntryScope, FsAction> unionPerms =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskNeeded = EnumSet.noneOf(AclEntryScope.class);
// Determine which scopes are present, which scopes need a mask, and the
// union of group class permissions in each scope.
for (AclEntry entry: aclBuilder) {
scopeFound.add(entry.getScope());
if (entry.getType() == GROUP || entry.getName() != null) {
FsAction scopeUnionPerms = Objects.firstNonNull(
unionPerms.get(entry.getScope()), FsAction.NONE);
unionPerms.put(entry.getScope(),
scopeUnionPerms.or(entry.getPermission()));
}
if (entry.getName() != null) {
maskNeeded.add(entry.getScope());
}
}
// Add mask entry if needed in each scope.
for (AclEntryScope scope: scopeFound) {
if (!providedMask.containsKey(scope) && maskNeeded.contains(scope) &&
maskDirty.contains(scope)) {
// Caller explicitly removed mask entry, but it's required.
throw new AclException(
"Invalid ACL: mask is required, but it was deleted.");
} else if (providedMask.containsKey(scope) &&
(!scopeDirty.contains(scope) || maskDirty.contains(scope))) {
// Caller explicitly provided new mask, or we are preserving the existing
// mask in an unchanged scope.
aclBuilder.add(providedMask.get(scope));
} else if (maskNeeded.contains(scope) || providedMask.containsKey(scope)) {
// Otherwise, if there are maskable entries present, or the ACL
// previously had a mask, then recalculate a mask automatically.
aclBuilder.add(new AclEntry.Builder()
.setScope(scope)
.setType(MASK)
.setPermission(unionPerms.get(scope))
.build());
}
}
}
/**
* Adds unspecified default entries by copying permissions from the
* corresponding access entries.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
*/
private static void copyDefaultsIfNeeded(List<AclEntry> aclBuilder) {
Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR);
ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
if (!scopedEntries.getDefaultEntries().isEmpty()) {
List<AclEntry> accessEntries = scopedEntries.getAccessEntries();
List<AclEntry> defaultEntries = scopedEntries.getDefaultEntries();
List<AclEntry> copiedEntries = Lists.newArrayListWithCapacity(3);
for (AclEntryType type: EnumSet.of(USER, GROUP, OTHER)) {
AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT)
.setType(type).build();
int defaultEntryIndex = Collections.binarySearch(defaultEntries,
defaultEntryKey, ACL_ENTRY_COMPARATOR);
if (defaultEntryIndex < 0) {
AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS)
.setType(type).build();
int accessEntryIndex = Collections.binarySearch(accessEntries,
accessEntryKey, ACL_ENTRY_COMPARATOR);
if (accessEntryIndex >= 0) {
copiedEntries.add(new AclEntry.Builder()
.setScope(DEFAULT)
.setType(type)
.setPermission(accessEntries.get(accessEntryIndex).getPermission())
.build());
}
}
}
// Add all copied entries when done to prevent potential issues with binary
// search on a modified aclBulider during the main loop.
aclBuilder.addAll(copiedEntries);
}
}
/**
* An ACL spec that has been pre-validated and sorted.
*/
private static final class ValidatedAclSpec implements Iterable<AclEntry> {
private final List<AclEntry> aclSpec;
/**
* Creates a ValidatedAclSpec by pre-validating and sorting the given ACL
* entries. Pre-validation checks that it does not exceed the maximum
* entries. This check is performed before modifying the ACL, and it's
* actually insufficient for enforcing the maximum number of entries.
* Transformation logic can create additional entries automatically,such as
* the mask and some of the default entries, so we also need additional
* checks during transformation. The up-front check is still valuable here
* so that we don't run a lot of expensive transformation logic while
* holding the namesystem lock for an attacker who intentionally sent a huge
* ACL spec.
*
* @param aclSpec List<AclEntry> containing unvalidated input ACL spec
* @throws AclException if validation fails
*/
public ValidatedAclSpec(List<AclEntry> aclSpec) throws AclException {
if (aclSpec.size() > MAX_ENTRIES) {
throw new AclException("Invalid ACL: ACL spec has " + aclSpec.size() +
" entries, which exceeds maximum of " + MAX_ENTRIES + ".");
}
Collections.sort(aclSpec, ACL_ENTRY_COMPARATOR);
this.aclSpec = aclSpec;
}
/**
* Returns true if this contains an entry matching the given key. An ACL
* entry's key consists of scope, type and name (but not permission).
*
* @param key AclEntry search key
* @return boolean true if found
*/
public boolean containsKey(AclEntry key) {
return Collections.binarySearch(aclSpec, key, ACL_ENTRY_COMPARATOR) >= 0;
}
/**
* Returns the entry matching the given key or null if not found. An ACL
* entry's key consists of scope, type and name (but not permission).
*
* @param key AclEntry search key
* @return AclEntry entry matching the given key or null if not found
*/
public AclEntry findByKey(AclEntry key) {
int index = Collections.binarySearch(aclSpec, key, ACL_ENTRY_COMPARATOR);
if (index >= 0) {
return aclSpec.get(index);
}
return null;
}
@Override
public Iterator<AclEntry> iterator() {
return aclSpec.iterator();
}
}
}

View File

@ -39,12 +39,15 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -283,7 +286,7 @@ public class FSDirectory implements Closeable {
short replication, long preferredBlockSize, String clientName, short replication, long preferredBlockSize, String clientName,
String clientMachine, DatanodeDescriptor clientNode) String clientMachine, DatanodeDescriptor clientNode)
throws FileAlreadyExistsException, QuotaExceededException, throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException { UnresolvedLinkException, SnapshotAccessControlException, AclException {
waitForReady(); waitForReady();
// Always do an implicit mkdirs for parent directory tree. // Always do an implicit mkdirs for parent directory tree.
@ -325,6 +328,7 @@ public class FSDirectory implements Closeable {
INodeFile unprotectedAddFile( long id, INodeFile unprotectedAddFile( long id,
String path, String path,
PermissionStatus permissions, PermissionStatus permissions,
List<AclEntry> aclEntries,
short replication, short replication,
long modificationTime, long modificationTime,
long atime, long atime,
@ -347,6 +351,10 @@ public class FSDirectory implements Closeable {
try { try {
if (addINode(path, newNode)) { if (addINode(path, newNode)) {
if (aclEntries != null) {
AclStorage.updateINodeAcl(newNode, aclEntries,
Snapshot.CURRENT_STATE_ID);
}
return newNode; return newNode;
} }
} catch (IOException e) { } catch (IOException e) {
@ -1168,7 +1176,8 @@ public class FSDirectory implements Closeable {
if (inode == null) { if (inode == null) {
throw new FileNotFoundException("File does not exist: " + src); throw new FileNotFoundException("File does not exist: " + src);
} }
inode.setPermission(permissions, inodesInPath.getLatestSnapshotId()); int snapshotId = inodesInPath.getLatestSnapshotId();
inode.setPermission(permissions, snapshotId);
} }
void setOwner(String src, String username, String groupname) void setOwner(String src, String username, String groupname)
@ -1616,6 +1625,14 @@ public class FSDirectory implements Closeable {
*/ */
private HdfsFileStatus getFileInfo4DotSnapshot(String src) private HdfsFileStatus getFileInfo4DotSnapshot(String src)
throws UnresolvedLinkException { throws UnresolvedLinkException {
if (getINode4DotSnapshot(src) != null) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0);
}
return null;
}
private INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {
Preconditions.checkArgument( Preconditions.checkArgument(
src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
"%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
@ -1627,8 +1644,7 @@ public class FSDirectory implements Closeable {
if (node != null if (node != null
&& node.isDirectory() && node.isDirectory()
&& node.asDirectory() instanceof INodeDirectorySnapshottable) { && node.asDirectory() instanceof INodeDirectorySnapshottable) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, return node;
HdfsFileStatus.EMPTY_NAME, -1L, 0);
} }
return null; return null;
} }
@ -1907,7 +1923,8 @@ public class FSDirectory implements Closeable {
boolean mkdirs(String src, PermissionStatus permissions, boolean mkdirs(String src, PermissionStatus permissions,
boolean inheritPermission, long now) boolean inheritPermission, long now)
throws FileAlreadyExistsException, QuotaExceededException, throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException { UnresolvedLinkException, SnapshotAccessControlException,
AclException {
src = normalizePath(src); src = normalizePath(src);
String[] names = INode.getPathNames(src); String[] names = INode.getPathNames(src);
byte[][] components = INode.getPathComponents(names); byte[][] components = INode.getPathComponents(names);
@ -1970,7 +1987,7 @@ public class FSDirectory implements Closeable {
pathbuilder.append(Path.SEPARATOR + names[i]); pathbuilder.append(Path.SEPARATOR + names[i]);
unprotectedMkdir(namesystem.allocateNewInodeId(), iip, i, unprotectedMkdir(namesystem.allocateNewInodeId(), iip, i,
components[i], (i < lastInodeIndex) ? parentPermissions components[i], (i < lastInodeIndex) ? parentPermissions
: permissions, now); : permissions, null, now);
if (inodes[i] == null) { if (inodes[i] == null) {
return false; return false;
} }
@ -1993,14 +2010,14 @@ public class FSDirectory implements Closeable {
} }
INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions, INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions,
long timestamp) throws QuotaExceededException, List<AclEntry> aclEntries, long timestamp)
UnresolvedLinkException { throws QuotaExceededException, UnresolvedLinkException, AclException {
assert hasWriteLock(); assert hasWriteLock();
byte[][] components = INode.getPathComponents(src); byte[][] components = INode.getPathComponents(src);
INodesInPath iip = getExistingPathINodes(components); INodesInPath iip = getExistingPathINodes(components);
INode[] inodes = iip.getINodes(); INode[] inodes = iip.getINodes();
final int pos = inodes.length - 1; final int pos = inodes.length - 1;
unprotectedMkdir(inodeId, iip, pos, components[pos], permissions, unprotectedMkdir(inodeId, iip, pos, components[pos], permissions, aclEntries,
timestamp); timestamp);
return inodes[pos]; return inodes[pos];
} }
@ -2010,12 +2027,16 @@ public class FSDirectory implements Closeable {
* All ancestors exist. Newly created one stored at index pos. * All ancestors exist. Newly created one stored at index pos.
*/ */
private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath, private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
int pos, byte[] name, PermissionStatus permission, long timestamp) int pos, byte[] name, PermissionStatus permission,
throws QuotaExceededException { List<AclEntry> aclEntries, long timestamp)
throws QuotaExceededException, AclException {
assert hasWriteLock(); assert hasWriteLock();
final INodeDirectory dir = new INodeDirectory(inodeId, name, permission, final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
timestamp); timestamp);
if (addChild(inodesInPath, pos, dir, true)) { if (addChild(inodesInPath, pos, dir, true)) {
if (aclEntries != null) {
AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
}
inodesInPath.setINode(pos, dir); inodesInPath.setINode(pos, dir);
} }
} }
@ -2240,6 +2261,7 @@ public class FSDirectory implements Closeable {
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
} else { } else {
iip.setINode(pos - 1, child.getParent()); iip.setINode(pos - 1, child.getParent());
AclStorage.copyINodeDefaultAcl(child);
addToInodeMap(child); addToInodeMap(child);
} }
return added; return added;
@ -2625,7 +2647,7 @@ public class FSDirectory implements Closeable {
INodeSymlink addSymlink(String path, String target, INodeSymlink addSymlink(String path, String target,
PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
throws UnresolvedLinkException, FileAlreadyExistsException, throws UnresolvedLinkException, FileAlreadyExistsException,
QuotaExceededException, SnapshotAccessControlException { QuotaExceededException, SnapshotAccessControlException, AclException {
waitForReady(); waitForReady();
final long modTime = now(); final long modTime = now();
@ -2669,7 +2691,154 @@ public class FSDirectory implements Closeable {
target); target);
return addINode(path, symlink) ? symlink : null; return addINode(path, symlink) ? symlink : null;
} }
void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
writeLock();
try {
List<AclEntry> newAcl = unprotectedModifyAclEntries(src, aclSpec);
fsImage.getEditLog().logSetAcl(src, newAcl);
} finally {
writeUnlock();
}
}
private List<AclEntry> unprotectedModifyAclEntries(String src,
List<AclEntry> aclSpec) throws IOException {
assert hasWriteLock();
INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.mergeAclEntries(existingAcl,
aclSpec);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
return newAcl;
}
void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
writeLock();
try {
List<AclEntry> newAcl = unprotectedRemoveAclEntries(src, aclSpec);
fsImage.getEditLog().logSetAcl(src, newAcl);
} finally {
writeUnlock();
}
}
private List<AclEntry> unprotectedRemoveAclEntries(String src,
List<AclEntry> aclSpec) throws IOException {
assert hasWriteLock();
INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.filterAclEntriesByAclSpec(
existingAcl, aclSpec);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
return newAcl;
}
void removeDefaultAcl(String src) throws IOException {
writeLock();
try {
List<AclEntry> newAcl = unprotectedRemoveDefaultAcl(src);
fsImage.getEditLog().logSetAcl(src, newAcl);
} finally {
writeUnlock();
}
}
private List<AclEntry> unprotectedRemoveDefaultAcl(String src)
throws IOException {
assert hasWriteLock();
INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.filterDefaultAclEntries(
existingAcl);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
return newAcl;
}
void removeAcl(String src) throws IOException {
writeLock();
try {
unprotectedRemoveAcl(src);
fsImage.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
} finally {
writeUnlock();
}
}
private void unprotectedRemoveAcl(String src) throws IOException {
assert hasWriteLock();
INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getLatestSnapshotId();
AclStorage.removeINodeAcl(inode, snapshotId);
}
void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
writeLock();
try {
List<AclEntry> newAcl = unprotectedSetAcl(src, aclSpec);
fsImage.getEditLog().logSetAcl(src, newAcl);
} finally {
writeUnlock();
}
}
List<AclEntry> unprotectedSetAcl(String src, List<AclEntry> aclSpec)
throws IOException {
// ACL removal is logged to edits as OP_SET_ACL with an empty list.
if (aclSpec.isEmpty()) {
unprotectedRemoveAcl(src);
return AclFeature.EMPTY_ENTRY_LIST;
}
assert hasWriteLock();
INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.replaceAclEntries(existingAcl,
aclSpec);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
return newAcl;
}
AclStatus getAclStatus(String src) throws IOException {
String srcs = normalizePath(src);
readLock();
try {
// There is no real inode for the path ending in ".snapshot", so return a
// non-null, unpopulated AclStatus. This is similar to getFileInfo.
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR) &&
getINode4DotSnapshot(srcs) != null) {
return new AclStatus.Builder().owner("").group("").build();
}
INodesInPath iip = rootDir.getLastINodeInPath(srcs, true);
INode inode = resolveLastINode(src, iip);
int snapshotId = iip.getPathSnapshotId();
List<AclEntry> acl = AclStorage.readINodeAcl(inode, snapshotId);
return new AclStatus.Builder()
.owner(inode.getUserName()).group(inode.getGroupName())
.stickyBit(inode.getFsPermission(snapshotId).getStickyBit())
.addEntries(acl).build();
} finally {
readUnlock();
}
}
private static INode resolveLastINode(String src, INodesInPath iip)
throws FileNotFoundException {
INode inode = iip.getLastINode();
if (inode == null)
throw new FileNotFoundException("cannot find " + src);
return inode;
}
/** /**
* Caches frequently used file names to reuse file name objects and * Caches frequently used file names to reuse file name objects and
* reduce heap size. * reduce heap size.

View File

@ -34,7 +34,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
@ -71,6 +73,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
@ -684,6 +687,7 @@ public class FSEditLog implements LogsPurgeable {
*/ */
public void logOpenFile(String path, INodeFile newNode, boolean toLogRpcIds) { public void logOpenFile(String path, INodeFile newNode, boolean toLogRpcIds) {
Preconditions.checkArgument(newNode.isUnderConstruction()); Preconditions.checkArgument(newNode.isUnderConstruction());
PermissionStatus permissions = newNode.getPermissionStatus();
AddOp op = AddOp.getInstance(cache.get()) AddOp op = AddOp.getInstance(cache.get())
.setInodeId(newNode.getId()) .setInodeId(newNode.getId())
.setPath(path) .setPath(path)
@ -692,9 +696,14 @@ public class FSEditLog implements LogsPurgeable {
.setAccessTime(newNode.getAccessTime()) .setAccessTime(newNode.getAccessTime())
.setBlockSize(newNode.getPreferredBlockSize()) .setBlockSize(newNode.getPreferredBlockSize())
.setBlocks(newNode.getBlocks()) .setBlocks(newNode.getBlocks())
.setPermissionStatus(newNode.getPermissionStatus()) .setPermissionStatus(permissions)
.setClientName(newNode.getFileUnderConstructionFeature().getClientName()) .setClientName(newNode.getFileUnderConstructionFeature().getClientName())
.setClientMachine(newNode.getFileUnderConstructionFeature().getClientMachine()); .setClientMachine(newNode.getFileUnderConstructionFeature().getClientMachine());
AclFeature f = newNode.getAclFeature();
if (f != null) {
op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
}
logRpcIds(op, toLogRpcIds); logRpcIds(op, toLogRpcIds);
logEdit(op); logEdit(op);
} }
@ -739,11 +748,17 @@ public class FSEditLog implements LogsPurgeable {
* Add create directory record to edit log * Add create directory record to edit log
*/ */
public void logMkDir(String path, INode newNode) { public void logMkDir(String path, INode newNode) {
PermissionStatus permissions = newNode.getPermissionStatus();
MkdirOp op = MkdirOp.getInstance(cache.get()) MkdirOp op = MkdirOp.getInstance(cache.get())
.setInodeId(newNode.getId()) .setInodeId(newNode.getId())
.setPath(path) .setPath(path)
.setTimestamp(newNode.getModificationTime()) .setTimestamp(newNode.getModificationTime())
.setPermissionStatus(newNode.getPermissionStatus()); .setPermissionStatus(permissions);
AclFeature f = newNode.getAclFeature();
if (f != null) {
op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
}
logEdit(op); logEdit(op);
} }
@ -1029,6 +1044,13 @@ public class FSEditLog implements LogsPurgeable {
logEdit(op); logEdit(op);
} }
void logSetAcl(String src, List<AclEntry> entries) {
SetAclOp op = SetAclOp.getInstance();
op.src = src;
op.aclEntries = entries;
logEdit(op);
}
/** /**
* Get all the journals this edit log is currently operating on. * Get all the journals this edit log is currently operating on.
*/ */

View File

@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp;
@ -350,9 +351,10 @@ public class FSEditLogLoader {
inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion, inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion,
lastInodeId); lastInodeId);
newFile = fsDir.unprotectedAddFile(inodeId, newFile = fsDir.unprotectedAddFile(inodeId,
path, addCloseOp.permissions, replication, path, addCloseOp.permissions, addCloseOp.aclEntries,
addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true, replication, addCloseOp.mtime, addCloseOp.atime,
addCloseOp.clientName, addCloseOp.clientMachine); addCloseOp.blockSize, true, addCloseOp.clientName,
addCloseOp.clientMachine);
fsNamesys.leaseManager.addLease(addCloseOp.clientName, path); fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);
// add the op into retry cache if necessary // add the op into retry cache if necessary
@ -512,7 +514,7 @@ public class FSEditLogLoader {
lastInodeId); lastInodeId);
fsDir.unprotectedMkdir(inodeId, fsDir.unprotectedMkdir(inodeId,
renameReservedPathsOnUpgrade(mkdirOp.path, logVersion), renameReservedPathsOnUpgrade(mkdirOp.path, logVersion),
mkdirOp.permissions, mkdirOp.timestamp); mkdirOp.permissions, mkdirOp.aclEntries, mkdirOp.timestamp);
break; break;
} }
case OP_SET_GENSTAMP_V1: { case OP_SET_GENSTAMP_V1: {
@ -808,6 +810,11 @@ public class FSEditLogLoader {
} }
break; break;
} }
case OP_SET_ACL: {
SetAclOp setAclOp = (SetAclOp) op;
fsDir.unprotectedSetAcl(setAclOp.src, setAclOp.aclEntries);
break;
}
default: default:
throw new IOException("Invalid operation read " + op.opCode); throw new IOException("Invalid operation read " + op.opCode);
} }

View File

@ -46,6 +46,7 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ROLLING_UPGRADE_FINALIZE; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ROLLING_UPGRADE_FINALIZE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ROLLING_UPGRADE_START; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ROLLING_UPGRADE_START;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_ACL;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_GENSTAMP_V1; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_GENSTAMP_V1;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_GENSTAMP_V2; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_GENSTAMP_V2;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_NS_QUOTA; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_NS_QUOTA;
@ -78,6 +79,10 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -88,6 +93,8 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
@ -110,6 +117,8 @@ import org.xml.sax.helpers.AttributesImpl;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
/** /**
* Helper classes for reading the ops from an InputStream. * Helper classes for reading the ops from an InputStream.
@ -170,6 +179,7 @@ public abstract class FSEditLogOp {
inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp()); inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp());
inst.put(OP_ADD_BLOCK, new AddBlockOp()); inst.put(OP_ADD_BLOCK, new AddBlockOp());
inst.put(OP_SET_ACL, new SetAclOp());
inst.put(OP_ROLLING_UPGRADE_START, new RollingUpgradeOp( inst.put(OP_ROLLING_UPGRADE_START, new RollingUpgradeOp(
OP_ROLLING_UPGRADE_START, "start")); OP_ROLLING_UPGRADE_START, "start"));
inst.put(OP_ROLLING_UPGRADE_FINALIZE, new RollingUpgradeOp( inst.put(OP_ROLLING_UPGRADE_FINALIZE, new RollingUpgradeOp(
@ -181,6 +191,16 @@ public abstract class FSEditLogOp {
} }
} }
private static ImmutableMap<String, FsAction> fsActionMap() {
ImmutableMap.Builder<String, FsAction> b = ImmutableMap.builder();
for (FsAction v : FsAction.values())
b.put(v.SYMBOL, v);
return b.build();
}
private static final ImmutableMap<String, FsAction> FSACTION_SYMBOL_MAP
= fsActionMap();
/** /**
* Constructor for an EditLog Op. EditLog ops cannot be constructed * Constructor for an EditLog Op. EditLog ops cannot be constructed
* directly, but only through Reader#readOp. * directly, but only through Reader#readOp.
@ -282,7 +302,76 @@ public abstract class FSEditLogOp {
XMLUtils.addSaxString(contentHandler, "RPC_CALLID", XMLUtils.addSaxString(contentHandler, "RPC_CALLID",
Integer.valueOf(callId).toString()); Integer.valueOf(callId).toString());
} }
private static final class AclEditLogUtil {
private static final int ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET = 6;
private static final int ACL_EDITLOG_ENTRY_TYPE_OFFSET = 3;
private static final int ACL_EDITLOG_ENTRY_SCOPE_OFFSET = 5;
private static final int ACL_EDITLOG_PERM_MASK = 7;
private static final int ACL_EDITLOG_ENTRY_TYPE_MASK = 3;
private static final int ACL_EDITLOG_ENTRY_SCOPE_MASK = 1;
private static final FsAction[] FSACTION_VALUES = FsAction.values();
private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope
.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
.values();
private static List<AclEntry> read(DataInputStream in, int logVersion)
throws IOException {
if (!LayoutVersion.supports(Feature.EXTENDED_ACL, logVersion)) {
return null;
}
int size = in.readInt();
if (size == 0) {
return null;
}
List<AclEntry> aclEntries = Lists.newArrayListWithCapacity(size);
for (int i = 0; i < size; ++i) {
int v = in.read();
int p = v & ACL_EDITLOG_PERM_MASK;
int t = (v >> ACL_EDITLOG_ENTRY_TYPE_OFFSET)
& ACL_EDITLOG_ENTRY_TYPE_MASK;
int s = (v >> ACL_EDITLOG_ENTRY_SCOPE_OFFSET)
& ACL_EDITLOG_ENTRY_SCOPE_MASK;
boolean hasName = ((v >> ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET) & 1) == 1;
String name = hasName ? FSImageSerialization.readString(in) : null;
aclEntries.add(new AclEntry.Builder().setName(name)
.setPermission(FSACTION_VALUES[p])
.setScope(ACL_ENTRY_SCOPE_VALUES[s])
.setType(ACL_ENTRY_TYPE_VALUES[t]).build());
}
return aclEntries;
}
private static void write(List<AclEntry> aclEntries, DataOutputStream out)
throws IOException {
if (aclEntries == null) {
out.writeInt(0);
return;
}
out.writeInt(aclEntries.size());
for (AclEntry e : aclEntries) {
boolean hasName = e.getName() != null;
int v = (e.getScope().ordinal() << ACL_EDITLOG_ENTRY_SCOPE_OFFSET)
| (e.getType().ordinal() << ACL_EDITLOG_ENTRY_TYPE_OFFSET)
| e.getPermission().ordinal();
if (hasName) {
v |= 1 << ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET;
}
out.write(v);
if (hasName) {
FSImageSerialization.writeString(e.getName(), out);
}
}
}
}
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatingOp { static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatingOp {
int length; int length;
@ -294,6 +383,7 @@ public abstract class FSEditLogOp {
long blockSize; long blockSize;
Block[] blocks; Block[] blocks;
PermissionStatus permissions; PermissionStatus permissions;
List<AclEntry> aclEntries;
String clientName; String clientName;
String clientMachine; String clientMachine;
@ -356,6 +446,11 @@ public abstract class FSEditLogOp {
return (T)this; return (T)this;
} }
<T extends AddCloseOp> T setAclEntries(List<AclEntry> aclEntries) {
this.aclEntries = aclEntries;
return (T)this;
}
<T extends AddCloseOp> T setClientName(String clientName) { <T extends AddCloseOp> T setClientName(String clientName) {
this.clientName = clientName; this.clientName = clientName;
return (T)this; return (T)this;
@ -378,6 +473,7 @@ public abstract class FSEditLogOp {
permissions.write(out); permissions.write(out);
if (this.opCode == OP_ADD) { if (this.opCode == OP_ADD) {
AclEditLogUtil.write(aclEntries, out);
FSImageSerialization.writeString(clientName,out); FSImageSerialization.writeString(clientName,out);
FSImageSerialization.writeString(clientMachine,out); FSImageSerialization.writeString(clientMachine,out);
// write clientId and callId // write clientId and callId
@ -442,6 +538,7 @@ public abstract class FSEditLogOp {
// clientname, clientMachine and block locations of last block. // clientname, clientMachine and block locations of last block.
if (this.opCode == OP_ADD) { if (this.opCode == OP_ADD) {
aclEntries = AclEditLogUtil.read(in, logVersion);
this.clientName = FSImageSerialization.readString(in); this.clientName = FSImageSerialization.readString(in);
this.clientMachine = FSImageSerialization.readString(in); this.clientMachine = FSImageSerialization.readString(in);
// read clientId and callId // read clientId and callId
@ -493,6 +590,8 @@ public abstract class FSEditLogOp {
builder.append(Arrays.toString(blocks)); builder.append(Arrays.toString(blocks));
builder.append(", permissions="); builder.append(", permissions=");
builder.append(permissions); builder.append(permissions);
builder.append(", aclEntries=");
builder.append(aclEntries);
builder.append(", clientName="); builder.append(", clientName=");
builder.append(clientName); builder.append(clientName);
builder.append(", clientMachine="); builder.append(", clientMachine=");
@ -530,6 +629,9 @@ public abstract class FSEditLogOp {
} }
FSEditLogOp.permissionStatusToXml(contentHandler, permissions); FSEditLogOp.permissionStatusToXml(contentHandler, permissions);
if (this.opCode == OP_ADD) { if (this.opCode == OP_ADD) {
if (aclEntries != null) {
appendAclEntriesToXml(contentHandler, aclEntries);
}
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId); appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
} }
} }
@ -555,6 +657,7 @@ public abstract class FSEditLogOp {
this.blocks = new Block[0]; this.blocks = new Block[0];
} }
this.permissions = permissionStatusFromXml(st); this.permissions = permissionStatusFromXml(st);
aclEntries = readAclEntriesFromXml(st);
readRpcIdsFromXml(st); readRpcIdsFromXml(st);
} }
} }
@ -1229,6 +1332,7 @@ public abstract class FSEditLogOp {
String path; String path;
long timestamp; long timestamp;
PermissionStatus permissions; PermissionStatus permissions;
List<AclEntry> aclEntries;
private MkdirOp() { private MkdirOp() {
super(OP_MKDIR); super(OP_MKDIR);
@ -1258,6 +1362,11 @@ public abstract class FSEditLogOp {
return this; return this;
} }
MkdirOp setAclEntries(List<AclEntry> aclEntries) {
this.aclEntries = aclEntries;
return this;
}
@Override @Override
public public
void writeFields(DataOutputStream out) throws IOException { void writeFields(DataOutputStream out) throws IOException {
@ -1266,6 +1375,7 @@ public abstract class FSEditLogOp {
FSImageSerialization.writeLong(timestamp, out); // mtime FSImageSerialization.writeLong(timestamp, out); // mtime
FSImageSerialization.writeLong(timestamp, out); // atime, unused at this FSImageSerialization.writeLong(timestamp, out); // atime, unused at this
permissions.write(out); permissions.write(out);
AclEditLogUtil.write(aclEntries, out);
} }
@Override @Override
@ -1309,6 +1419,7 @@ public abstract class FSEditLogOp {
} }
this.permissions = PermissionStatus.read(in); this.permissions = PermissionStatus.read(in);
aclEntries = AclEditLogUtil.read(in, logVersion);
} }
@Override @Override
@ -1324,6 +1435,8 @@ public abstract class FSEditLogOp {
builder.append(timestamp); builder.append(timestamp);
builder.append(", permissions="); builder.append(", permissions=");
builder.append(permissions); builder.append(permissions);
builder.append(", aclEntries=");
builder.append(aclEntries);
builder.append(", opCode="); builder.append(", opCode=");
builder.append(opCode); builder.append(opCode);
builder.append(", txid="); builder.append(", txid=");
@ -1342,6 +1455,9 @@ public abstract class FSEditLogOp {
XMLUtils.addSaxString(contentHandler, "TIMESTAMP", XMLUtils.addSaxString(contentHandler, "TIMESTAMP",
Long.valueOf(timestamp).toString()); Long.valueOf(timestamp).toString());
FSEditLogOp.permissionStatusToXml(contentHandler, permissions); FSEditLogOp.permissionStatusToXml(contentHandler, permissions);
if (aclEntries != null) {
appendAclEntriesToXml(contentHandler, aclEntries);
}
} }
@Override void fromXml(Stanza st) throws InvalidXmlException { @Override void fromXml(Stanza st) throws InvalidXmlException {
@ -1350,6 +1466,7 @@ public abstract class FSEditLogOp {
this.path = st.getValue("PATH"); this.path = st.getValue("PATH");
this.timestamp = Long.valueOf(st.getValue("TIMESTAMP")); this.timestamp = Long.valueOf(st.getValue("TIMESTAMP"));
this.permissions = permissionStatusFromXml(st); this.permissions = permissionStatusFromXml(st);
aclEntries = readAclEntriesFromXml(st);
} }
} }
@ -3371,6 +3488,50 @@ public abstract class FSEditLogOp {
} }
} }
static class SetAclOp extends FSEditLogOp {
List<AclEntry> aclEntries = Lists.newArrayList();
String src;
private SetAclOp() {
super(OP_SET_ACL);
}
static SetAclOp getInstance() {
return new SetAclOp();
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
AclEditLogProto p = AclEditLogProto.parseDelimitedFrom((DataInputStream)in);
src = p.getSrc();
aclEntries = PBHelper.convertAclEntry(p.getEntriesList());
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
AclEditLogProto.Builder b = AclEditLogProto.newBuilder();
if (src != null)
b.setSrc(src);
b.addAllEntries(PBHelper.convertAclEntryProto(aclEntries));
b.build().writeDelimitedTo(out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
appendAclEntriesToXml(contentHandler, aclEntries);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
src = st.getValue("SRC");
aclEntries = readAclEntriesFromXml(st);
if (aclEntries == null) {
aclEntries = Lists.newArrayList();
}
}
}
static private short readShort(DataInputStream in) throws IOException { static private short readShort(DataInputStream in) throws IOException {
return Short.parseShort(FSImageSerialization.readString(in)); return Short.parseShort(FSImageSerialization.readString(in));
} }
@ -3841,4 +4002,45 @@ public abstract class FSEditLogOp {
short mode = Short.valueOf(st.getValue("MODE")); short mode = Short.valueOf(st.getValue("MODE"));
return new FsPermission(mode); return new FsPermission(mode);
} }
private static void fsActionToXml(ContentHandler contentHandler, FsAction v)
throws SAXException {
XMLUtils.addSaxString(contentHandler, "PERM", v.SYMBOL);
}
private static FsAction fsActionFromXml(Stanza st) throws InvalidXmlException {
FsAction v = FSACTION_SYMBOL_MAP.get(st.getValue("PERM"));
if (v == null)
throw new InvalidXmlException("Invalid value for FsAction");
return v;
}
private static void appendAclEntriesToXml(ContentHandler contentHandler,
List<AclEntry> aclEntries) throws SAXException {
for (AclEntry e : aclEntries) {
contentHandler.startElement("", "", "ENTRY", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "SCOPE", e.getScope().name());
XMLUtils.addSaxString(contentHandler, "TYPE", e.getType().name());
XMLUtils.addSaxString(contentHandler, "NAME", e.getName());
fsActionToXml(contentHandler, e.getPermission());
contentHandler.endElement("", "", "ENTRY");
}
}
private static List<AclEntry> readAclEntriesFromXml(Stanza st) {
List<AclEntry> aclEntries = Lists.newArrayList();
if (!st.hasChildren("ENTRY"))
return null;
List<Stanza> stanzas = st.getChildren("ENTRY");
for (Stanza s : stanzas) {
AclEntry e = new AclEntry.Builder()
.setScope(AclEntryScope.valueOf(s.getValue("SCOPE")))
.setType(AclEntryType.valueOf(s.getValue("TYPE")))
.setName(s.getValue("NAME"))
.setPermission(fsActionFromXml(s)).build();
aclEntries.add(e);
}
return aclEntries;
}
} }

View File

@ -67,8 +67,9 @@ public enum FSEditLogOpCodes {
OP_MODIFY_CACHE_POOL ((byte) 37), OP_MODIFY_CACHE_POOL ((byte) 37),
OP_REMOVE_CACHE_POOL ((byte) 38), OP_REMOVE_CACHE_POOL ((byte) 38),
OP_MODIFY_CACHE_DIRECTIVE ((byte) 39), OP_MODIFY_CACHE_DIRECTIVE ((byte) 39),
OP_ROLLING_UPGRADE_START ((byte) 40), OP_SET_ACL ((byte) 40),
OP_ROLLING_UPGRADE_FINALIZE ((byte) 41), OP_ROLLING_UPGRADE_START ((byte) 41),
OP_ROLLING_UPGRADE_FINALIZE ((byte) 42),
// Note that the current range of the valid OP code is 0~127 // Note that the current range of the valid OP code is 0~127
OP_INVALID ((byte) -1); OP_INVALID ((byte) -1);

View File

@ -782,12 +782,10 @@ public class FSImageFormat {
modificationTime, atime, blocks, replication, blockSize); modificationTime, atime, blocks, replication, blockSize);
if (underConstruction) { if (underConstruction) {
file.toUnderConstruction(clientName, clientMachine, null); file.toUnderConstruction(clientName, clientMachine, null);
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
} else {
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
} }
} else if (numBlocks == -1) { return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
//directory } else if (numBlocks == -1) {
//directory
//read quotas //read quotas
final long nsQuota = in.readLong(); final long nsQuota = in.readLong();
@ -877,8 +875,8 @@ public class FSImageFormat {
final short replication = namesystem.getBlockManager().adjustReplication( final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort()); in.readShort());
final long preferredBlockSize = in.readLong(); final long preferredBlockSize = in.readLong();
return new INodeFileAttributes.SnapshotCopy(name, permissions, modificationTime, return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
accessTime, replication, preferredBlockSize); accessTime, replication, preferredBlockSize);
} }
@ -900,9 +898,9 @@ public class FSImageFormat {
final long dsQuota = in.readLong(); final long dsQuota = in.readLong();
return nsQuota == -1L && dsQuota == -1L? return nsQuota == -1L && dsQuota == -1L?
new INodeDirectoryAttributes.SnapshotCopy(name, permissions, modificationTime) new INodeDirectoryAttributes.SnapshotCopy(name, permissions, null, modificationTime)
: new INodeDirectoryAttributes.CopyWithQuota(name, permissions, : new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
modificationTime, nsQuota, dsQuota); null, modificationTime, nsQuota, dsQuota);
} }
private void loadFilesUnderConstruction(DataInput in, private void loadFilesUnderConstruction(DataInput in,

View File

@ -30,6 +30,10 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
@ -38,15 +42,18 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.protobuf.ByteString; import com.google.protobuf.ByteString;
@InterfaceAudience.Private @InterfaceAudience.Private
@ -54,6 +61,20 @@ public final class FSImageFormatPBINode {
private final static long USER_GROUP_STRID_MASK = (1 << 24) - 1; private final static long USER_GROUP_STRID_MASK = (1 << 24) - 1;
private final static int USER_STRID_OFFSET = 40; private final static int USER_STRID_OFFSET = 40;
private final static int GROUP_STRID_OFFSET = 16; private final static int GROUP_STRID_OFFSET = 16;
private static final int ACL_ENTRY_NAME_MASK = (1 << 24) - 1;
private static final int ACL_ENTRY_NAME_OFFSET = 6;
private static final int ACL_ENTRY_TYPE_OFFSET = 3;
private static final int ACL_ENTRY_SCOPE_OFFSET = 5;
private static final int ACL_ENTRY_PERM_MASK = 7;
private static final int ACL_ENTRY_TYPE_MASK = 3;
private static final int ACL_ENTRY_SCOPE_MASK = 1;
private static final FsAction[] FSACTION_VALUES = FsAction.values();
private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope
.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
.values();
private static final Log LOG = LogFactory.getLog(FSImageFormatProtobuf.class); private static final Log LOG = LogFactory.getLog(FSImageFormatProtobuf.class);
public final static class Loader { public final static class Loader {
@ -66,13 +87,30 @@ public final class FSImageFormatPBINode {
new FsPermission(perm)); new FsPermission(perm));
} }
public static ImmutableList<AclEntry> loadAclEntries(
AclFeatureProto proto, final String[] stringTable) {
ImmutableList.Builder<AclEntry> b = ImmutableList.builder();
for (int v : proto.getEntriesList()) {
int p = v & ACL_ENTRY_PERM_MASK;
int t = (v >> ACL_ENTRY_TYPE_OFFSET) & ACL_ENTRY_TYPE_MASK;
int s = (v >> ACL_ENTRY_SCOPE_OFFSET) & ACL_ENTRY_SCOPE_MASK;
int nid = (v >> ACL_ENTRY_NAME_OFFSET) & ACL_ENTRY_NAME_MASK;
String name = stringTable[nid];
b.add(new AclEntry.Builder().setName(name)
.setPermission(FSACTION_VALUES[p])
.setScope(ACL_ENTRY_SCOPE_VALUES[s])
.setType(ACL_ENTRY_TYPE_VALUES[t]).build());
}
return b.build();
}
public static INodeDirectory loadINodeDirectory(INodeSection.INode n, public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
final String[] stringTable) { LoaderContext state) {
assert n.getType() == INodeSection.INode.Type.DIRECTORY; assert n.getType() == INodeSection.INode.Type.DIRECTORY;
INodeSection.INodeDirectory d = n.getDirectory(); INodeSection.INodeDirectory d = n.getDirectory();
final PermissionStatus permissions = loadPermission(d.getPermission(), final PermissionStatus permissions = loadPermission(d.getPermission(),
stringTable); state.getStringTable());
final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName() final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName()
.toByteArray(), permissions, d.getModificationTime()); .toByteArray(), permissions, d.getModificationTime());
@ -80,6 +118,11 @@ public final class FSImageFormatPBINode {
if (nsQuota >= 0 || dsQuota >= 0) { if (nsQuota >= 0 || dsQuota >= 0) {
dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota); dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
} }
if (d.hasAcl()) {
dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(),
state.getStringTable())));
}
return dir; return dir;
} }
@ -181,7 +224,7 @@ public final class FSImageFormatPBINode {
case FILE: case FILE:
return loadINodeFile(n); return loadINodeFile(n);
case DIRECTORY: case DIRECTORY:
return loadINodeDirectory(n, parent.getLoaderContext().getStringTable()); return loadINodeDirectory(n, parent.getLoaderContext());
case SYMLINK: case SYMLINK:
return loadINodeSymlink(n); return loadINodeSymlink(n);
default: default:
@ -195,6 +238,7 @@ public final class FSImageFormatPBINode {
INodeSection.INodeFile f = n.getFile(); INodeSection.INodeFile f = n.getFile();
List<BlockProto> bp = f.getBlocksList(); List<BlockProto> bp = f.getBlocksList();
short replication = (short) f.getReplication(); short replication = (short) f.getReplication();
LoaderContext state = parent.getLoaderContext();
BlockInfo[] blocks = new BlockInfo[bp.size()]; BlockInfo[] blocks = new BlockInfo[bp.size()];
for (int i = 0, e = bp.size(); i < e; ++i) { for (int i = 0, e = bp.size(); i < e; ++i) {
@ -206,6 +250,12 @@ public final class FSImageFormatPBINode {
final INodeFile file = new INodeFile(n.getId(), final INodeFile file = new INodeFile(n.getId(),
n.getName().toByteArray(), permissions, f.getModificationTime(), n.getName().toByteArray(), permissions, f.getModificationTime(),
f.getAccessTime(), blocks, replication, f.getPreferredBlockSize()); f.getAccessTime(), blocks, replication, f.getPreferredBlockSize());
if (f.hasAcl()) {
file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(),
state.getStringTable())));
}
// under-construction information // under-construction information
if (f.hasFileUC()) { if (f.hasFileUC()) {
INodeSection.FileUnderConstructionFeature uc = f.getFileUC(); INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
@ -227,13 +277,15 @@ public final class FSImageFormatPBINode {
INodeSection.INodeSymlink s = n.getSymlink(); INodeSection.INodeSymlink s = n.getSymlink();
final PermissionStatus permissions = loadPermission(s.getPermission(), final PermissionStatus permissions = loadPermission(s.getPermission(),
parent.getLoaderContext().getStringTable()); parent.getLoaderContext().getStringTable());
return new INodeSymlink(n.getId(), n.getName().toByteArray(), permissions,
0, 0, s.getTarget().toStringUtf8()); INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(),
permissions, 0, 0, s.getTarget().toStringUtf8());
return sym;
} }
private void loadRootINode(INodeSection.INode p) { private void loadRootINode(INodeSection.INode p) {
INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext() INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
.getStringTable());
final Quota.Counts q = root.getQuotaCounts(); final Quota.Counts q = root.getQuotaCounts();
final long nsQuota = q.get(Quota.NAMESPACE); final long nsQuota = q.get(Quota.NAMESPACE);
final long dsQuota = q.get(Quota.DISKSPACE); final long dsQuota = q.get(Quota.DISKSPACE);
@ -255,27 +307,48 @@ public final class FSImageFormatPBINode {
| n.getFsPermissionShort(); | n.getFsPermissionShort();
} }
private static AclFeatureProto.Builder buildAclEntries(AclFeature f,
final SaverContext.DeduplicationMap<String> map) {
AclFeatureProto.Builder b = AclFeatureProto.newBuilder();
for (AclEntry e : f.getEntries()) {
int v = ((map.getId(e.getName()) & ACL_ENTRY_NAME_MASK) << ACL_ENTRY_NAME_OFFSET)
| (e.getType().ordinal() << ACL_ENTRY_TYPE_OFFSET)
| (e.getScope().ordinal() << ACL_ENTRY_SCOPE_OFFSET)
| (e.getPermission().ordinal());
b.addEntries(v);
}
return b;
}
public static INodeSection.INodeFile.Builder buildINodeFile( public static INodeSection.INodeFile.Builder buildINodeFile(
INodeFileAttributes file, INodeFileAttributes file, final SaverContext state) {
final SaverContext.DeduplicationMap<String> stringMap) {
INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder() INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder()
.setAccessTime(file.getAccessTime()) .setAccessTime(file.getAccessTime())
.setModificationTime(file.getModificationTime()) .setModificationTime(file.getModificationTime())
.setPermission(buildPermissionStatus(file, stringMap)) .setPermission(buildPermissionStatus(file, state.getStringMap()))
.setPreferredBlockSize(file.getPreferredBlockSize()) .setPreferredBlockSize(file.getPreferredBlockSize())
.setReplication(file.getFileReplication()); .setReplication(file.getFileReplication());
AclFeature f = file.getAclFeature();
if (f != null) {
b.setAcl(buildAclEntries(f, state.getStringMap()));
}
return b; return b;
} }
public static INodeSection.INodeDirectory.Builder buildINodeDirectory( public static INodeSection.INodeDirectory.Builder buildINodeDirectory(
INodeDirectoryAttributes dir, INodeDirectoryAttributes dir, final SaverContext state) {
final SaverContext.DeduplicationMap<String> stringMap) {
Quota.Counts quota = dir.getQuotaCounts(); Quota.Counts quota = dir.getQuotaCounts();
INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory
.newBuilder().setModificationTime(dir.getModificationTime()) .newBuilder().setModificationTime(dir.getModificationTime())
.setNsQuota(quota.get(Quota.NAMESPACE)) .setNsQuota(quota.get(Quota.NAMESPACE))
.setDsQuota(quota.get(Quota.DISKSPACE)) .setDsQuota(quota.get(Quota.DISKSPACE))
.setPermission(buildPermissionStatus(dir, stringMap)); .setPermission(buildPermissionStatus(dir, state.getStringMap()));
AclFeature f = dir.getAclFeature();
if (f != null) {
b.setAcl(buildAclEntries(f, state.getStringMap()));
}
return b; return b;
} }
@ -376,7 +449,7 @@ public final class FSImageFormatPBINode {
private void save(OutputStream out, INodeDirectory n) throws IOException { private void save(OutputStream out, INodeDirectory n) throws IOException {
INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n, INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n,
parent.getSaverContext().getStringMap()); parent.getSaverContext());
INodeSection.INode r = buildINodeCommon(n) INodeSection.INode r = buildINodeCommon(n)
.setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build(); .setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build();
r.writeDelimitedTo(out); r.writeDelimitedTo(out);
@ -384,7 +457,7 @@ public final class FSImageFormatPBINode {
private void save(OutputStream out, INodeFile n) throws IOException { private void save(OutputStream out, INodeFile n) throws IOException {
INodeSection.INodeFile.Builder b = buildINodeFile(n, INodeSection.INodeFile.Builder b = buildINodeFile(n,
parent.getSaverContext().getStringMap()); parent.getSaverContext());
for (Block block : n.getBlocks()) { for (Block block : n.getBlocks()) {
b.addBlocks(PBHelper.convert(block)); b.addBlocks(PBHelper.convert(block));
@ -405,10 +478,12 @@ public final class FSImageFormatPBINode {
} }
private void save(OutputStream out, INodeSymlink n) throws IOException { private void save(OutputStream out, INodeSymlink n) throws IOException {
SaverContext state = parent.getSaverContext();
INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
.newBuilder() .newBuilder()
.setPermission(buildPermissionStatus(n, parent.getSaverContext().getStringMap())) .setPermission(buildPermissionStatus(n, state.getStringMap()))
.setTarget(ByteString.copyFrom(n.getSymlink())); .setTarget(ByteString.copyFrom(n.getSymlink()));
INodeSection.INode r = buildINodeCommon(n) INodeSection.INode r = buildINodeCommon(n)
.setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build(); .setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build();
r.writeDelimitedTo(out); r.writeDelimitedTo(out);

View File

@ -115,9 +115,11 @@ public final class FSImageFormatProtobuf {
return map.entrySet(); return map.entrySet();
} }
} }
private final DeduplicationMap<String> stringMap = DeduplicationMap.newMap();
private final ArrayList<INodeReference> refList = Lists.newArrayList(); private final ArrayList<INodeReference> refList = Lists.newArrayList();
private final DeduplicationMap<String> stringMap = DeduplicationMap
.newMap();
public DeduplicationMap<String> getStringMap() { public DeduplicationMap<String> getStringMap() {
return stringMap; return stringMap;
} }
@ -552,6 +554,7 @@ public final class FSImageFormatProtobuf {
public enum SectionName { public enum SectionName {
NS_INFO("NS_INFO"), NS_INFO("NS_INFO"),
STRING_TABLE("STRING_TABLE"), STRING_TABLE("STRING_TABLE"),
EXTENDED_ACL("EXTENDED_ACL"),
INODE("INODE"), INODE("INODE"),
INODE_REFERENCE("INODE_REFERENCE"), INODE_REFERENCE("INODE_REFERENCE"),
SNAPSHOT("SNAPSHOT"), SNAPSHOT("SNAPSHOT"),

View File

@ -142,6 +142,8 @@ import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
@ -510,7 +512,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private INodeId inodeId; private INodeId inodeId;
private final RetryCache retryCache; private final RetryCache retryCache;
private final AclConfigFlag aclConfigFlag;
/** /**
* Set the last allocated inode id when fsimage or editlog is loaded. * Set the last allocated inode id when fsimage or editlog is loaded.
*/ */
@ -778,6 +782,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
this.isDefaultAuditLogger = auditLoggers.size() == 1 && this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
auditLoggers.get(0) instanceof DefaultAuditLogger; auditLoggers.get(0) instanceof DefaultAuditLogger;
this.retryCache = ignoreRetryCache ? null : initRetryCache(conf); this.retryCache = ignoreRetryCache ? null : initRetryCache(conf);
this.aclConfigFlag = new AclConfigFlag(conf);
} catch(IOException e) { } catch(IOException e) {
LOG.error(getClass().getSimpleName() + " initialization failed.", e); LOG.error(getClass().getSimpleName() + " initialization failed.", e);
close(); close();
@ -7470,6 +7475,123 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return results; return results;
} }
void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
aclConfigFlag.checkForApiCall();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src);
dir.modifyAclEntries(src, aclSpec);
resultingStat = getAuditFileInfo(src, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "modifyAclEntries", src, null, resultingStat);
}
void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
aclConfigFlag.checkForApiCall();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove ACL entries on " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src);
dir.removeAclEntries(src, aclSpec);
resultingStat = getAuditFileInfo(src, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "removeAclEntries", src, null, resultingStat);
}
void removeDefaultAcl(String src) throws IOException {
aclConfigFlag.checkForApiCall();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src);
dir.removeDefaultAcl(src);
resultingStat = getAuditFileInfo(src, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "removeDefaultAcl", src, null, resultingStat);
}
void removeAcl(String src) throws IOException {
aclConfigFlag.checkForApiCall();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove ACL on " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src);
dir.removeAcl(src);
resultingStat = getAuditFileInfo(src, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "removeAcl", src, null, resultingStat);
}
void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
aclConfigFlag.checkForApiCall();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set ACL on " + src);
src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src);
dir.setAcl(src, aclSpec);
resultingStat = getAuditFileInfo(src, false);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "setAcl", src, null, resultingStat);
}
AclStatus getAclStatus(String src) throws IOException {
aclConfigFlag.checkForApiCall();
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
return dir.getAclStatus(src);
} finally {
readUnlock();
}
}
/** /**
* Default AuditLogger implementation; used when no access logger is * Default AuditLogger implementation; used when no access logger is
* defined in the config file. It can also be explicitly listed in the * defined in the config file. It can also be explicitly listed in the

View File

@ -20,16 +20,21 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.Stack; import java.util.Stack;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
/** /**
* Class that helps in checking file system permission. * Class that helps in checking file system permission.
@ -42,12 +47,27 @@ class FSPermissionChecker {
static final Log LOG = LogFactory.getLog(UserGroupInformation.class); static final Log LOG = LogFactory.getLog(UserGroupInformation.class);
/** @return a string for throwing {@link AccessControlException} */ /** @return a string for throwing {@link AccessControlException} */
private static String toAccessControlString(INode inode) { private String toAccessControlString(INode inode, int snapshotId,
return "\"" + inode.getFullPathName() + "\":" FsAction access, FsPermission mode) {
+ inode.getUserName() + ":" + inode.getGroupName() return toAccessControlString(inode, snapshotId, access, mode, null);
+ ":" + (inode.isDirectory()? "d": "-") + inode.getFsPermission();
} }
/** @return a string for throwing {@link AccessControlException} */
private String toAccessControlString(INode inode, int snapshotId,
FsAction access, FsPermission mode, List<AclEntry> featureEntries) {
StringBuilder sb = new StringBuilder("Permission denied: ")
.append("user=").append(user).append(", ")
.append("access=").append(access).append(", ")
.append("inode=\"").append(inode.getFullPathName()).append("\":")
.append(inode.getUserName(snapshotId)).append(':')
.append(inode.getGroupName(snapshotId)).append(':')
.append(inode.isDirectory() ? 'd' : '-')
.append(mode);
if (featureEntries != null) {
sb.append(':').append(StringUtils.join(",", featureEntries));
}
return sb.toString();
}
private final UserGroupInformation ugi; private final UserGroupInformation ugi;
private final String user; private final String user;
@ -219,7 +239,20 @@ class FSPermissionChecker {
return; return;
} }
FsPermission mode = inode.getFsPermission(snapshotId); FsPermission mode = inode.getFsPermission(snapshotId);
AclFeature aclFeature = inode.getAclFeature(snapshotId);
if (aclFeature != null) {
List<AclEntry> featureEntries = aclFeature.getEntries();
// It's possible that the inode has a default ACL but no access ACL.
if (featureEntries.get(0).getScope() == AclEntryScope.ACCESS) {
checkAccessAcl(inode, snapshotId, access, mode, featureEntries);
return;
}
}
checkFsPermission(inode, snapshotId, access, mode);
}
private void checkFsPermission(INode inode, int snapshotId, FsAction access,
FsPermission mode) throws AccessControlException {
if (user.equals(inode.getUserName(snapshotId))) { //user class if (user.equals(inode.getUserName(snapshotId))) { //user class
if (mode.getUserAction().implies(access)) { return; } if (mode.getUserAction().implies(access)) { return; }
} }
@ -229,8 +262,88 @@ class FSPermissionChecker {
else { //other class else { //other class
if (mode.getOtherAction().implies(access)) { return; } if (mode.getOtherAction().implies(access)) { return; }
} }
throw new AccessControlException("Permission denied: user=" + user throw new AccessControlException(
+ ", access=" + access + ", inode=" + toAccessControlString(inode)); toAccessControlString(inode, snapshotId, access, mode));
}
/**
* Checks requested access against an Access Control List. This method relies
* on finding the ACL data in the relevant portions of {@link FsPermission} and
* {@link AclFeature} as implemented in the logic of {@link AclStorage}. This
* method also relies on receiving the ACL entries in sorted order. This is
* assumed to be true, because the ACL modification methods in
* {@link AclTransformation} sort the resulting entries.
*
* More specifically, this method depends on these invariants in an ACL:
* - The list must be sorted.
* - Each entry in the list must be unique by scope + type + name.
* - There is exactly one each of the unnamed user/group/other entries.
* - The mask entry must not have a name.
* - The other entry must not have a name.
* - Default entries may be present, but they are ignored during enforcement.
*
* @param inode INode accessed inode
* @param snapshotId int snapshot ID
* @param access FsAction requested permission
* @param mode FsPermission mode from inode
* @param featureEntries List<AclEntry> ACL entries from AclFeature of inode
* @throws AccessControlException if the ACL denies permission
*/
private void checkAccessAcl(INode inode, int snapshotId, FsAction access,
FsPermission mode, List<AclEntry> featureEntries)
throws AccessControlException {
boolean foundMatch = false;
// Use owner entry from permission bits if user is owner.
if (user.equals(inode.getUserName(snapshotId))) {
if (mode.getUserAction().implies(access)) {
return;
}
foundMatch = true;
}
// Check named user and group entries if user was not denied by owner entry.
if (!foundMatch) {
for (AclEntry entry: featureEntries) {
if (entry.getScope() == AclEntryScope.DEFAULT) {
break;
}
AclEntryType type = entry.getType();
String name = entry.getName();
if (type == AclEntryType.USER) {
// Use named user entry with mask from permission bits applied if user
// matches name.
if (user.equals(name)) {
FsAction masked = entry.getPermission().and(mode.getGroupAction());
if (masked.implies(access)) {
return;
}
foundMatch = true;
}
} else if (type == AclEntryType.GROUP) {
// Use group entry (unnamed or named) with mask from permission bits
// applied if user is a member and entry grants access. If user is a
// member of multiple groups that have entries that grant access, then
// it doesn't matter which is chosen, so exit early after first match.
String group = name == null ? inode.getGroupName(snapshotId) : name;
if (groups.contains(group)) {
FsAction masked = entry.getPermission().and(mode.getGroupAction());
if (masked.implies(access)) {
return;
}
foundMatch = true;
}
}
}
}
// Use other entry if user was not denied by an earlier match.
if (!foundMatch && mode.getOtherAction().implies(access)) {
return;
}
throw new AccessControlException(
toAccessControlString(inode, snapshotId, access, mode, featureEntries));
} }
/** Guarded by {@link FSNamesystem#readLock()} */ /** Guarded by {@link FSNamesystem#readLock()} */

View File

@ -154,6 +154,31 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
return nodeToUpdate; return nodeToUpdate;
} }
abstract AclFeature getAclFeature(int snapshotId);
@Override
public final AclFeature getAclFeature() {
return getAclFeature(Snapshot.CURRENT_STATE_ID);
}
abstract void addAclFeature(AclFeature aclFeature);
final INode addAclFeature(AclFeature aclFeature, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.addAclFeature(aclFeature);
return nodeToUpdate;
}
abstract void removeAclFeature();
final INode removeAclFeature(int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.removeAclFeature();
return nodeToUpdate;
}
/** /**
* @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID}, * @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID},
* return this; otherwise return the corresponding snapshot inode. * return this; otherwise return the corresponding snapshot inode.

View File

@ -48,6 +48,9 @@ public interface INodeAttributes {
/** @return the permission information as a long. */ /** @return the permission information as a long. */
public long getPermissionLong(); public long getPermissionLong();
/** @return the ACL feature. */
public AclFeature getAclFeature();
/** @return the modification time. */ /** @return the modification time. */
public long getModificationTime(); public long getModificationTime();
@ -58,13 +61,15 @@ public interface INodeAttributes {
public static abstract class SnapshotCopy implements INodeAttributes { public static abstract class SnapshotCopy implements INodeAttributes {
private final byte[] name; private final byte[] name;
private final long permission; private final long permission;
private final AclFeature aclFeature;
private final long modificationTime; private final long modificationTime;
private final long accessTime; private final long accessTime;
SnapshotCopy(byte[] name, PermissionStatus permissions, SnapshotCopy(byte[] name, PermissionStatus permissions,
long modificationTime, long accessTime) { AclFeature aclFeature, long modificationTime, long accessTime) {
this.name = name; this.name = name;
this.permission = PermissionStatusFormat.toLong(permissions); this.permission = PermissionStatusFormat.toLong(permissions);
this.aclFeature = aclFeature;
this.modificationTime = modificationTime; this.modificationTime = modificationTime;
this.accessTime = accessTime; this.accessTime = accessTime;
} }
@ -72,6 +77,7 @@ public interface INodeAttributes {
SnapshotCopy(INode inode) { SnapshotCopy(INode inode) {
this.name = inode.getLocalNameBytes(); this.name = inode.getLocalNameBytes();
this.permission = inode.getPermissionLong(); this.permission = inode.getPermissionLong();
this.aclFeature = inode.getAclFeature();
this.modificationTime = inode.getModificationTime(); this.modificationTime = inode.getModificationTime();
this.accessTime = inode.getAccessTime(); this.accessTime = inode.getAccessTime();
} }
@ -108,6 +114,11 @@ public interface INodeAttributes {
return permission; return permission;
} }
@Override
public AclFeature getAclFeature() {
return aclFeature;
}
@Override @Override
public final long getModificationTime() { public final long getModificationTime() {
return modificationTime; return modificationTime;

View File

@ -77,8 +77,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
* @param other The INodeDirectory to be copied * @param other The INodeDirectory to be copied
* @param adopt Indicate whether or not need to set the parent field of child * @param adopt Indicate whether or not need to set the parent field of child
* INodes to the new node * INodes to the new node
* @param featuresToCopy any number of features to copy to the new node.
* The method will do a reference copy, not a deep copy.
*/ */
public INodeDirectory(INodeDirectory other, boolean adopt, boolean copyFeatures) { public INodeDirectory(INodeDirectory other, boolean adopt,
Feature... featuresToCopy) {
super(other); super(other);
this.children = other.children; this.children = other.children;
if (adopt && this.children != null) { if (adopt && this.children != null) {
@ -86,9 +89,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
child.setParent(this); child.setParent(this);
} }
} }
if (copyFeatures) { this.features = featuresToCopy;
this.features = other.features;
}
} }
/** @return true unconditionally. */ /** @return true unconditionally. */
@ -145,12 +146,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
* otherwise, return null. * otherwise, return null.
*/ */
public final DirectoryWithQuotaFeature getDirectoryWithQuotaFeature() { public final DirectoryWithQuotaFeature getDirectoryWithQuotaFeature() {
for (Feature f : features) { return getFeature(DirectoryWithQuotaFeature.class);
if (f instanceof DirectoryWithQuotaFeature) {
return (DirectoryWithQuotaFeature)f;
}
}
return null;
} }
/** Is this directory with quota? */ /** Is this directory with quota? */
@ -185,12 +181,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
* otherwise, return null. * otherwise, return null.
*/ */
public final DirectoryWithSnapshotFeature getDirectoryWithSnapshotFeature() { public final DirectoryWithSnapshotFeature getDirectoryWithSnapshotFeature() {
for (Feature f : features) { return getFeature(DirectoryWithSnapshotFeature.class);
if (f instanceof DirectoryWithSnapshotFeature) {
return (DirectoryWithSnapshotFeature) f;
}
}
return null;
} }
/** Is this file has the snapshot feature? */ /** Is this file has the snapshot feature? */
@ -231,7 +222,8 @@ public class INodeDirectory extends INodeWithAdditionalFields
public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) { public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
Preconditions.checkState(getClass() != INodeDirectory.class, Preconditions.checkState(getClass() != INodeDirectory.class,
"the class is already INodeDirectory, this=%s", this); "the class is already INodeDirectory, this=%s", this);
return replaceSelf(new INodeDirectory(this, true, true), inodeMap); return replaceSelf(new INodeDirectory(this, true, this.getFeatures()),
inodeMap);
} }
/** Replace itself with the given directory. */ /** Replace itself with the given directory. */

View File

@ -35,8 +35,8 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
public static class SnapshotCopy extends INodeAttributes.SnapshotCopy public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
implements INodeDirectoryAttributes { implements INodeDirectoryAttributes {
public SnapshotCopy(byte[] name, PermissionStatus permissions, public SnapshotCopy(byte[] name, PermissionStatus permissions,
long modificationTime) { AclFeature aclFeature, long modificationTime) {
super(name, permissions, modificationTime, 0L); super(name, permissions, aclFeature, modificationTime, 0L);
} }
public SnapshotCopy(INodeDirectory dir) { public SnapshotCopy(INodeDirectory dir) {
@ -62,8 +62,9 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
public CopyWithQuota(byte[] name, PermissionStatus permissions, public CopyWithQuota(byte[] name, PermissionStatus permissions,
long modificationTime, long nsQuota, long dsQuota) { AclFeature aclFeature, long modificationTime, long nsQuota,
super(name, permissions, modificationTime); long dsQuota) {
super(name, permissions, aclFeature, modificationTime);
this.nsQuota = nsQuota; this.nsQuota = nsQuota;
this.dsQuota = dsQuota; this.dsQuota = dsQuota;
} }

View File

@ -151,12 +151,7 @@ public class INodeFile extends INodeWithAdditionalFields
* otherwise, return null. * otherwise, return null.
*/ */
public final FileUnderConstructionFeature getFileUnderConstructionFeature() { public final FileUnderConstructionFeature getFileUnderConstructionFeature() {
for (Feature f : features) { return getFeature(FileUnderConstructionFeature.class);
if (f instanceof FileUnderConstructionFeature) {
return (FileUnderConstructionFeature) f;
}
}
return null;
} }
/** Is this file under construction? */ /** Is this file under construction? */
@ -265,12 +260,7 @@ public class INodeFile extends INodeWithAdditionalFields
* otherwise, return null. * otherwise, return null.
*/ */
public final FileWithSnapshotFeature getFileWithSnapshotFeature() { public final FileWithSnapshotFeature getFileWithSnapshotFeature() {
for (Feature f: features) { return getFeature(FileWithSnapshotFeature.class);
if (f instanceof FileWithSnapshotFeature) {
return (FileWithSnapshotFeature) f;
}
}
return null;
} }
/** Is this file has the snapshot feature? */ /** Is this file has the snapshot feature? */

View File

@ -41,9 +41,9 @@ public interface INodeFileAttributes extends INodeAttributes {
private final long header; private final long header;
public SnapshotCopy(byte[] name, PermissionStatus permissions, public SnapshotCopy(byte[] name, PermissionStatus permissions,
long modificationTime, long accessTime, AclFeature aclFeature, long modificationTime, long accessTime,
short replication, long preferredBlockSize) { short replication, long preferredBlockSize) {
super(name, permissions, modificationTime, accessTime); super(name, permissions, aclFeature, modificationTime, accessTime);
final long h = HeaderFormat.combineReplication(0L, replication); final long h = HeaderFormat.combineReplication(0L, replication);
header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize); header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);

View File

@ -213,6 +213,22 @@ public abstract class INodeReference extends INode {
public final FsPermission getFsPermission(int snapshotId) { public final FsPermission getFsPermission(int snapshotId) {
return referred.getFsPermission(snapshotId); return referred.getFsPermission(snapshotId);
} }
@Override
final AclFeature getAclFeature(int snapshotId) {
return referred.getAclFeature(snapshotId);
}
@Override
final void addAclFeature(AclFeature aclFeature) {
referred.addAclFeature(aclFeature);
}
@Override
final void removeAclFeature() {
referred.removeAclFeature();
}
@Override @Override
public final short getFsPermissionShort() { public final short getFsPermissionShort() {
return referred.getFsPermissionShort(); return referred.getFsPermissionShort();

View File

@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.util.LightWeightGSet.LinkedElement; import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
@ -219,6 +220,15 @@ public abstract class INodeWithAdditionalFields extends INode
return permission; return permission;
} }
@Override
final AclFeature getAclFeature(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getAclFeature();
}
return getFeature(AclFeature.class);
}
@Override @Override
final long getModificationTime(int snapshotId) { final long getModificationTime(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) { if (snapshotId != Snapshot.CURRENT_STATE_ID) {
@ -305,4 +315,33 @@ public abstract class INodeWithAdditionalFields extends INode
+ f.getClass().getSimpleName() + " not found."); + f.getClass().getSimpleName() + " not found.");
features = arr; features = arr;
} }
protected <T extends Feature> T getFeature(Class<? extends Feature> clazz) {
for (Feature f : features) {
if (f.getClass() == clazz) {
@SuppressWarnings("unchecked")
T ret = (T) f;
return ret;
}
}
return null;
}
public void removeAclFeature() {
AclFeature f = getAclFeature();
Preconditions.checkNotNull(f);
removeFeature(f);
}
public void addAclFeature(AclFeature f) {
AclFeature f1 = getAclFeature();
if (f1 != null)
throw new IllegalStateException("Duplicated ACLFeature");
addFeature(f);
}
public final Feature[] getFeatures() {
return features;
}
} }

View File

@ -49,6 +49,8 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.ha.HAServiceStatus;
@ -1306,5 +1308,37 @@ class NameNodeRpcServer implements NamenodeProtocols {
throws IOException { throws IOException {
return namesystem.listCachePools(prevKey != null ? prevKey : ""); return namesystem.listCachePools(prevKey != null ? prevKey : "");
} }
@Override
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
namesystem.modifyAclEntries(src, aclSpec);
}
@Override
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
namesystem.removeAclEntries(src, aclSpec);
}
@Override
public void removeDefaultAcl(String src) throws IOException {
namesystem.removeDefaultAcl(src);
}
@Override
public void removeAcl(String src) throws IOException {
namesystem.removeAcl(src);
}
@Override
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
namesystem.setAcl(src, aclSpec);
}
@Override
public AclStatus getAclStatus(String src) throws IOException {
return namesystem.getAclStatus(src);
}
} }

View File

@ -0,0 +1,93 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
/**
* Groups a list of ACL entries into separate lists for access entries vs.
* default entries.
*/
@InterfaceAudience.Private
final class ScopedAclEntries {
private static final int PIVOT_NOT_FOUND = -1;
private final List<AclEntry> accessEntries;
private final List<AclEntry> defaultEntries;
/**
* Creates a new ScopedAclEntries from the given list. It is assumed that the
* list is already sorted such that all access entries precede all default
* entries.
*
* @param aclEntries List<AclEntry> to separate
*/
public ScopedAclEntries(List<AclEntry> aclEntries) {
int pivot = calculatePivotOnDefaultEntries(aclEntries);
if (pivot != PIVOT_NOT_FOUND) {
accessEntries = pivot != 0 ? aclEntries.subList(0, pivot) :
Collections.<AclEntry>emptyList();
defaultEntries = aclEntries.subList(pivot, aclEntries.size());
} else {
accessEntries = aclEntries;
defaultEntries = Collections.emptyList();
}
}
/**
* Returns access entries.
*
* @return List<AclEntry> containing just access entries, or an empty list if
* there are no access entries
*/
public List<AclEntry> getAccessEntries() {
return accessEntries;
}
/**
* Returns default entries.
*
* @return List<AclEntry> containing just default entries, or an empty list if
* there are no default entries
*/
public List<AclEntry> getDefaultEntries() {
return defaultEntries;
}
/**
* Returns the pivot point in the list between the access entries and the
* default entries. This is the index of the first element in the list that is
* a default entry.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
* @return int pivot point, or -1 if list contains no default entries
*/
private static int calculatePivotOnDefaultEntries(List<AclEntry> aclBuilder) {
for (int i = 0; i < aclBuilder.size(); ++i) {
if (aclBuilder.get(i).getScope() == AclEntryScope.DEFAULT) {
return i;
}
}
return PIVOT_NOT_FOUND;
}
}

View File

@ -36,8 +36,11 @@ import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
@ -154,7 +157,7 @@ public class FSImageFormatPBSnapshot {
SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot
.parseDelimitedFrom(in); .parseDelimitedFrom(in);
INodeDirectory root = loadINodeDirectory(pbs.getRoot(), INodeDirectory root = loadINodeDirectory(pbs.getRoot(),
parent.getLoaderContext().getStringTable()); parent.getLoaderContext());
int sid = pbs.getSnapshotId(); int sid = pbs.getSnapshotId();
INodeDirectorySnapshottable parent = (INodeDirectorySnapshottable) fsDir INodeDirectorySnapshottable parent = (INodeDirectorySnapshottable) fsDir
.getInode(root.getId()).asDirectory(); .getInode(root.getId()).asDirectory();
@ -197,6 +200,7 @@ public class FSImageFormatPBSnapshot {
private void loadFileDiffList(InputStream in, INodeFile file, int size) private void loadFileDiffList(InputStream in, INodeFile file, int size)
throws IOException { throws IOException {
final FileDiffList diffs = new FileDiffList(); final FileDiffList diffs = new FileDiffList();
final LoaderContext state = parent.getLoaderContext();
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
SnapshotDiffSection.FileDiff pbf = SnapshotDiffSection.FileDiff SnapshotDiffSection.FileDiff pbf = SnapshotDiffSection.FileDiff
.parseDelimitedFrom(in); .parseDelimitedFrom(in);
@ -204,10 +208,16 @@ public class FSImageFormatPBSnapshot {
if (pbf.hasSnapshotCopy()) { if (pbf.hasSnapshotCopy()) {
INodeSection.INodeFile fileInPb = pbf.getSnapshotCopy(); INodeSection.INodeFile fileInPb = pbf.getSnapshotCopy();
PermissionStatus permission = loadPermission( PermissionStatus permission = loadPermission(
fileInPb.getPermission(), parent.getLoaderContext() fileInPb.getPermission(), state.getStringTable());
.getStringTable());
AclFeature acl = null;
if (fileInPb.hasAcl()) {
acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
fileInPb.getAcl(), state.getStringTable()));
}
copy = new INodeFileAttributes.SnapshotCopy(pbf.getName() copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
.toByteArray(), permission, fileInPb.getModificationTime(), .toByteArray(), permission, acl, fileInPb.getModificationTime(),
fileInPb.getAccessTime(), (short) fileInPb.getReplication(), fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
fileInPb.getPreferredBlockSize()); fileInPb.getPreferredBlockSize());
} }
@ -277,6 +287,8 @@ public class FSImageFormatPBSnapshot {
dir.addSnapshotFeature(null); dir.addSnapshotFeature(null);
} }
DirectoryDiffList diffs = dir.getDiffs(); DirectoryDiffList diffs = dir.getDiffs();
final LoaderContext state = parent.getLoaderContext();
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
// load a directory diff // load a directory diff
SnapshotDiffSection.DirectoryDiff diffInPb = SnapshotDiffSection. SnapshotDiffSection.DirectoryDiff diffInPb = SnapshotDiffSection.
@ -292,15 +304,22 @@ public class FSImageFormatPBSnapshot {
INodeSection.INodeDirectory dirCopyInPb = diffInPb.getSnapshotCopy(); INodeSection.INodeDirectory dirCopyInPb = diffInPb.getSnapshotCopy();
final byte[] name = diffInPb.getName().toByteArray(); final byte[] name = diffInPb.getName().toByteArray();
PermissionStatus permission = loadPermission( PermissionStatus permission = loadPermission(
dirCopyInPb.getPermission(), parent.getLoaderContext() dirCopyInPb.getPermission(), state.getStringTable());
.getStringTable()); AclFeature acl = null;
if (dirCopyInPb.hasAcl()) {
acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
dirCopyInPb.getAcl(), state.getStringTable()));
}
long modTime = dirCopyInPb.getModificationTime(); long modTime = dirCopyInPb.getModificationTime();
boolean noQuota = dirCopyInPb.getNsQuota() == -1 boolean noQuota = dirCopyInPb.getNsQuota() == -1
&& dirCopyInPb.getDsQuota() == -1; && dirCopyInPb.getDsQuota() == -1;
copy = noQuota ? new INodeDirectoryAttributes.SnapshotCopy(name, copy = noQuota ? new INodeDirectoryAttributes.SnapshotCopy(name,
permission, modTime) permission, acl, modTime)
: new INodeDirectoryAttributes.CopyWithQuota(name, permission, : new INodeDirectoryAttributes.CopyWithQuota(name, permission,
modTime, dirCopyInPb.getNsQuota(), dirCopyInPb.getDsQuota()); acl, modTime, dirCopyInPb.getNsQuota(),
dirCopyInPb.getDsQuota());
} }
// load created list // load created list
List<INode> clist = loadCreatedList(in, dir, List<INode> clist = loadCreatedList(in, dir,
@ -355,7 +374,7 @@ public class FSImageFormatPBSnapshot {
SnapshotSection.Snapshot.Builder sb = SnapshotSection.Snapshot SnapshotSection.Snapshot.Builder sb = SnapshotSection.Snapshot
.newBuilder().setSnapshotId(s.getId()); .newBuilder().setSnapshotId(s.getId());
INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot, INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot,
parent.getSaverContext().getStringMap()); parent.getSaverContext());
INodeSection.INode r = INodeSection.INode.newBuilder() INodeSection.INode r = INodeSection.INode.newBuilder()
.setId(sroot.getId()) .setId(sroot.getId())
.setType(INodeSection.INode.Type.DIRECTORY) .setType(INodeSection.INode.Type.DIRECTORY)
@ -443,7 +462,7 @@ public class FSImageFormatPBSnapshot {
INodeFileAttributes copy = diff.snapshotINode; INodeFileAttributes copy = diff.snapshotINode;
if (copy != null) { if (copy != null) {
fb.setName(ByteString.copyFrom(copy.getLocalNameBytes())) fb.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
.setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext().getStringMap())); .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext()));
} }
fb.build().writeDelimitedTo(out); fb.build().writeDelimitedTo(out);
} }
@ -480,7 +499,7 @@ public class FSImageFormatPBSnapshot {
if (!diff.isSnapshotRoot() && copy != null) { if (!diff.isSnapshotRoot() && copy != null) {
db.setName(ByteString.copyFrom(copy.getLocalNameBytes())) db.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
.setSnapshotCopy( .setSnapshotCopy(
buildINodeDirectory(copy, parent.getSaverContext().getStringMap())); buildINodeDirectory(copy, parent.getSaverContext()));
} }
// process created list and deleted list // process created list and deleted list
List<INode> created = diff.getChildrenDiff() List<INode> created = diff.getChildrenDiff()

View File

@ -184,7 +184,7 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
private int snapshotQuota = SNAPSHOT_LIMIT; private int snapshotQuota = SNAPSHOT_LIMIT;
public INodeDirectorySnapshottable(INodeDirectory dir) { public INodeDirectorySnapshottable(INodeDirectory dir) {
super(dir, true, true); super(dir, true, dir.getFeatures());
// add snapshot feature if the original directory does not have it // add snapshot feature if the original directory does not have it
if (!isWithSnapshot()) { if (!isWithSnapshot()) {
addSnapshotFeature(null); addSnapshotFeature(null);

View File

@ -21,6 +21,7 @@ import java.io.DataInput;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Comparator; import java.util.Comparator;
import java.util.Date; import java.util.Date;
@ -28,12 +29,16 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
/** Snapshot of a sub-tree in the namesystem. */ /** Snapshot of a sub-tree in the namesystem. */
@InterfaceAudience.Private @InterfaceAudience.Private
public class Snapshot implements Comparable<byte[]> { public class Snapshot implements Comparable<byte[]> {
@ -139,7 +144,10 @@ public class Snapshot implements Comparable<byte[]> {
/** The root directory of the snapshot. */ /** The root directory of the snapshot. */
static public class Root extends INodeDirectory { static public class Root extends INodeDirectory {
Root(INodeDirectory other) { Root(INodeDirectory other) {
super(other, false, false); // Always preserve ACL.
super(other, false, Lists.newArrayList(
Iterables.filter(Arrays.asList(other.getFeatures()), AclFeature.class))
.toArray(new Feature[0]));
} }
@Override @Override

View File

@ -53,6 +53,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -71,6 +72,7 @@ import org.apache.hadoop.hdfs.web.ParamFilter;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.AclPermissionParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam; import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam;
@ -315,12 +317,14 @@ public class NamenodeWebHdfsMethods {
@QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT) @QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT)
final CreateParentParam createParent, final CreateParentParam createParent,
@QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT) @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
final TokenArgumentParam delegationTokenArgument final TokenArgumentParam delegationTokenArgument,
) throws IOException, InterruptedException { @QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
final AclPermissionParam aclPermission
)throws IOException, InterruptedException {
return put(ugi, delegation, username, doAsUser, ROOT, op, destination, return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
owner, group, permission, overwrite, bufferSize, replication, owner, group, permission, overwrite, bufferSize, replication,
blockSize, modificationTime, accessTime, renameOptions, createParent, blockSize, modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument); delegationTokenArgument,aclPermission);
} }
/** Handle HTTP PUT request. */ /** Handle HTTP PUT request. */
@ -364,12 +368,14 @@ public class NamenodeWebHdfsMethods {
@QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT) @QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT)
final CreateParentParam createParent, final CreateParentParam createParent,
@QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT) @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
final TokenArgumentParam delegationTokenArgument final TokenArgumentParam delegationTokenArgument,
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
final AclPermissionParam aclPermission
) throws IOException, InterruptedException { ) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, destination, owner, init(ugi, delegation, username, doAsUser, path, op, destination, owner,
group, permission, overwrite, bufferSize, replication, blockSize, group, permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, delegationTokenArgument); modificationTime, accessTime, renameOptions, delegationTokenArgument,aclPermission);
return ugi.doAs(new PrivilegedExceptionAction<Response>() { return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override @Override
@ -380,7 +386,7 @@ public class NamenodeWebHdfsMethods {
path.getAbsolutePath(), op, destination, owner, group, path.getAbsolutePath(), op, destination, owner, group,
permission, overwrite, bufferSize, replication, blockSize, permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, createParent, modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument); delegationTokenArgument,aclPermission);
} finally { } finally {
REMOTE_ADDRESS.set(null); REMOTE_ADDRESS.set(null);
} }
@ -407,7 +413,8 @@ public class NamenodeWebHdfsMethods {
final AccessTimeParam accessTime, final AccessTimeParam accessTime,
final RenameOptionSetParam renameOptions, final RenameOptionSetParam renameOptions,
final CreateParentParam createParent, final CreateParentParam createParent,
final TokenArgumentParam delegationTokenArgument final TokenArgumentParam delegationTokenArgument,
final AclPermissionParam aclPermission
) throws IOException, URISyntaxException { ) throws IOException, URISyntaxException {
final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF); final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
@ -487,6 +494,26 @@ public class NamenodeWebHdfsMethods {
np.cancelDelegationToken(token); np.cancelDelegationToken(token);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
} }
case MODIFYACLENTRIES: {
np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEACLENTRIES: {
np.removeAclEntries(fullpath, aclPermission.getAclPermission(false));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEDEFAULTACL: {
np.removeDefaultAcl(fullpath);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEACL: {
np.removeAcl(fullpath);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case SETACL: {
np.setAcl(fullpath, aclPermission.getAclPermission(true));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default: default:
throw new UnsupportedOperationException(op + " is not supported"); throw new UnsupportedOperationException(op + " is not supported");
} }
@ -727,6 +754,15 @@ public class NamenodeWebHdfsMethods {
WebHdfsFileSystem.getHomeDirectoryString(ugi)); WebHdfsFileSystem.getHomeDirectoryString(ugi));
return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} }
case GETACLSTATUS: {
AclStatus status = np.getAclStatus(fullpath);
if (status == null) {
throw new FileNotFoundException("File does not exist: " + fullpath);
}
final String js = JsonUtil.toJsonString(status);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
default: default:
throw new UnsupportedOperationException(op + " is not supported"); throw new UnsupportedOperationException(op + " is not supported");
} }

View File

@ -749,6 +749,7 @@ class ImageLoaderCurrent implements ImageLoader {
processPermission(in, v); processPermission(in, v);
} else if (numBlocks == -2) { } else if (numBlocks == -2) {
v.visit(ImageElement.SYMLINK, Text.readString(in)); v.visit(ImageElement.SYMLINK, Text.readString(in));
processPermission(in, v);
} else if (numBlocks == -3) { // reference node } else if (numBlocks == -3) { // reference node
final boolean isWithName = in.readBoolean(); final boolean isWithName = in.readBoolean();
int snapshotId = in.readInt(); int snapshotId = in.readInt();

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.*;
@ -613,4 +615,44 @@ public class JsonUtil {
return checksum; return checksum;
} }
/** Convert a AclStatus object to a Json string. */
public static String toJsonString(final AclStatus status) {
if (status == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("owner", status.getOwner());
m.put("group", status.getGroup());
m.put("stickyBit", status.isStickyBit());
m.put("entries", status.getEntries());
final Map<String, Map<String, Object>> finalMap =
new TreeMap<String, Map<String, Object>>();
finalMap.put(AclStatus.class.getSimpleName(), m);
return JSON.toString(finalMap);
}
/** Convert a Json map to a AclStatus object. */
public static AclStatus toAclStatus(final Map<?, ?> json) {
if (json == null) {
return null;
}
final Map<?, ?> m = (Map<?, ?>) json.get(AclStatus.class.getSimpleName());
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
aclStatusBuilder.owner((String) m.get("owner"));
aclStatusBuilder.group((String) m.get("group"));
aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit"));
final Object[] entries = (Object[]) m.get("entries");
List<AclEntry> aclEntryList = new ArrayList<AclEntry>();
for (int i = 0; i < entries.length; i++) {
AclEntry aclEntry = AclEntry.parseAclEntry((String) entries[i], true);
aclEntryList.add(aclEntry);
}
aclStatusBuilder.addEntries(aclEntryList);
return aclStatusBuilder.build();
}
} }

View File

@ -49,6 +49,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
@ -57,6 +59,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.AclPermissionParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam; import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam;
@ -697,6 +700,17 @@ public class WebHdfsFileSystem extends FileSystem
f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory())); f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
} }
@Override
public AclStatus getAclStatus(Path f) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETACLSTATUS;
final Map<?, ?> json = run(op, f);
AclStatus status = JsonUtil.toAclStatus(json);
if (status == null) {
throw new FileNotFoundException("File does not exist: " + f);
}
return status;
}
@Override @Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException { public boolean mkdirs(Path f, FsPermission permission) throws IOException {
statistics.incrementWriteOps(1); statistics.incrementWriteOps(1);
@ -757,6 +771,44 @@ public class WebHdfsFileSystem extends FileSystem
run(op, p, new PermissionParam(permission)); run(op, p, new PermissionParam(permission));
} }
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.MODIFYACLENTRIES;
run(op, path, new AclPermissionParam(aclSpec));
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.REMOVEACLENTRIES;
run(op, path, new AclPermissionParam(aclSpec));
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.REMOVEDEFAULTACL;
run(op, path);
}
@Override
public void removeAcl(Path path) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.REMOVEACL;
run(op, path);
}
@Override
public void setAcl(final Path p, final List<AclEntry> aclSpec)
throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.SETACL;
run(op, p, new AclPermissionParam(aclSpec));
}
@Override @Override
public boolean setReplication(final Path p, final short replication public boolean setReplication(final Path p, final short replication
) throws IOException { ) throws IOException {

View File

@ -0,0 +1,71 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.commons.lang.StringUtils;
/** AclPermission parameter. */
public class AclPermissionParam extends StringParam {
/** Parameter name. */
public static final String NAME = "aclspec";
/** Default parameter value. */
public static final String DEFAULT = "";
private static Domain DOMAIN = new Domain(NAME,
Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
/**
* Constructor.
*
* @param str a string representation of the parameter value.
*/
public AclPermissionParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
}
public AclPermissionParam(List<AclEntry> acl) {
super(DOMAIN,parseAclSpec(acl).equals(DEFAULT) ? null : parseAclSpec(acl));
}
@Override
public String getName() {
return NAME;
}
public List<AclEntry> getAclPermission(boolean includePermission) {
final String v = getValue();
return (v != null ? AclEntry.parseAclSpec(v, includePermission) : AclEntry
.parseAclSpec(DEFAULT, includePermission));
}
/**
* Parse the list of AclEntry and returns aclspec.
*
* @param List <AclEntry>
* @return String
*/
private static String parseAclSpec(List<AclEntry> aclEntry) {
return StringUtils.join(aclEntry, ",");
}
}

View File

@ -35,6 +35,7 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
/** GET_BLOCK_LOCATIONS is a private unstable op. */ /** GET_BLOCK_LOCATIONS is a private unstable op. */
GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK), GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);

View File

@ -37,6 +37,12 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true), RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true), CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
MODIFYACLENTRIES(false, HttpURLConnection.HTTP_OK),
REMOVEACLENTRIES(false, HttpURLConnection.HTTP_OK),
REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK),
REMOVEACL(false, HttpURLConnection.HTTP_OK),
SETACL(false, HttpURLConnection.HTTP_OK),
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
final boolean doOutputAndRedirect; final boolean doOutputAndRedirect;

View File

@ -30,6 +30,7 @@ package hadoop.hdfs;
import "Security.proto"; import "Security.proto";
import "hdfs.proto"; import "hdfs.proto";
import "acl.proto";
/** /**
* The ClientNamenodeProtocol Service defines the interface between a client * The ClientNamenodeProtocol Service defines the interface between a client
@ -741,4 +742,16 @@ service ClientNamenodeProtocol {
returns(GetSnapshotDiffReportResponseProto); returns(GetSnapshotDiffReportResponseProto);
rpc isFileClosed(IsFileClosedRequestProto) rpc isFileClosed(IsFileClosedRequestProto)
returns(IsFileClosedResponseProto); returns(IsFileClosedResponseProto);
rpc modifyAclEntries(ModifyAclEntriesRequestProto)
returns(ModifyAclEntriesResponseProto);
rpc removeAclEntries(RemoveAclEntriesRequestProto)
returns(RemoveAclEntriesResponseProto);
rpc removeDefaultAcl(RemoveDefaultAclRequestProto)
returns(RemoveDefaultAclResponseProto);
rpc removeAcl(RemoveAclRequestProto)
returns(RemoveAclResponseProto);
rpc setAcl(SetAclRequestProto)
returns(SetAclResponseProto);
rpc getAclStatus(GetAclStatusRequestProto)
returns(GetAclStatusResponseProto);
} }

View File

@ -0,0 +1,112 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
option java_outer_classname = "AclProtos";
option java_generate_equals_and_hash = true;
package hadoop.hdfs;
import "hdfs.proto";
message AclEntryProto {
enum AclEntryScopeProto {
ACCESS = 0x0;
DEFAULT = 0x1;
}
enum AclEntryTypeProto {
USER = 0x0;
GROUP = 0x1;
MASK = 0x2;
OTHER = 0x3;
}
enum FsActionProto {
NONE = 0x0;
EXECUTE = 0x1;
WRITE = 0x2;
WRITE_EXECUTE = 0x3;
READ = 0x4;
READ_EXECUTE = 0x5;
READ_WRITE = 0x6;
PERM_ALL = 0x7;
}
required AclEntryTypeProto type = 1;
required AclEntryScopeProto scope = 2;
required FsActionProto permissions = 3;
optional string name = 4;
}
message AclStatusProto {
required string owner = 1;
required string group = 2;
required bool sticky = 3;
repeated AclEntryProto entries = 4;
}
message AclEditLogProto {
required string src = 1;
repeated AclEntryProto entries = 2;
}
message ModifyAclEntriesRequestProto {
required string src = 1;
repeated AclEntryProto aclSpec = 2;
}
message ModifyAclEntriesResponseProto {
}
message RemoveAclRequestProto {
required string src = 1;
}
message RemoveAclResponseProto {
}
message RemoveAclEntriesRequestProto {
required string src = 1;
repeated AclEntryProto aclSpec = 2;
}
message RemoveAclEntriesResponseProto {
}
message RemoveDefaultAclRequestProto {
required string src = 1;
}
message RemoveDefaultAclResponseProto {
}
message SetAclRequestProto {
required string src = 1;
repeated AclEntryProto aclSpec = 2;
}
message SetAclResponseProto {
}
message GetAclStatusRequestProto {
required string src = 1;
}
message GetAclStatusResponseProto {
required AclStatusProto result = 1;
}

View File

@ -22,6 +22,7 @@ option java_outer_classname = "FsImageProto";
package hadoop.hdfs.fsimage; package hadoop.hdfs.fsimage;
import "hdfs.proto"; import "hdfs.proto";
import "acl.proto";
/** /**
* This file defines the on-disk layout of the file system image. The * This file defines the on-disk layout of the file system image. The
@ -89,6 +90,23 @@ message INodeSection {
optional string clientMachine = 2; optional string clientMachine = 2;
} }
message AclFeatureProto {
/**
* An ACL entry is represented by a 32-bit integer in Big Endian
* format. The bits can be divided in four segments:
* [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
*
* [0:2) -- reserved for futute uses.
* [2:26) -- the name of the entry, which is an ID that points to a
* string in the StringTableSection.
* [26:27) -- the scope of the entry (AclEntryScopeProto)
* [27:29) -- the type of the entry (AclEntryTypeProto)
* [29:32) -- the permission of the entry (FsActionProto)
*
*/
repeated fixed32 entries = 2 [packed = true];
}
message INodeFile { message INodeFile {
optional uint32 replication = 1; optional uint32 replication = 1;
optional uint64 modificationTime = 2; optional uint64 modificationTime = 2;
@ -97,6 +115,7 @@ message INodeSection {
optional fixed64 permission = 5; optional fixed64 permission = 5;
repeated BlockProto blocks = 6; repeated BlockProto blocks = 6;
optional FileUnderConstructionFeature fileUC = 7; optional FileUnderConstructionFeature fileUC = 7;
optional AclFeatureProto acl = 8;
} }
message INodeDirectory { message INodeDirectory {
@ -106,6 +125,7 @@ message INodeSection {
// diskspace quota // diskspace quota
optional uint64 dsQuota = 3; optional uint64 dsQuota = 3;
optional fixed64 permission = 4; optional fixed64 permission = 4;
optional AclFeatureProto acl = 5;
} }
message INodeSymlink { message INodeSymlink {
@ -281,5 +301,4 @@ message CacheManagerSection {
required uint32 numDirectives = 3; required uint32 numDirectives = 3;
// repeated CachePoolInfoProto pools // repeated CachePoolInfoProto pools
// repeated CacheDirectiveInfoProto directives // repeated CacheDirectiveInfoProto directives
} }

View File

@ -363,6 +363,16 @@
</property> </property>
--> -->
<property>
<name>dfs.namenode.acls.enabled</name>
<value>false</value>
<description>
Set to true to enable support for HDFS ACLs (Access Control Lists). By
default, ACLs are disabled. When ACLs are disabled, the NameNode rejects
all RPCs related to setting or getting ACLs.
</description>
</property>
<property> <property>
<name>dfs.block.access.token.enable</name> <name>dfs.block.access.token.enable</name>
<value>false</value> <value>false</value>

View File

@ -47,6 +47,10 @@ HDFS Permissions Guide
client process, and its group is the group of the parent directory (the client process, and its group is the group of the parent directory (the
BSD rule). BSD rule).
HDFS also provides optional support for POSIX ACLs (Access Control Lists) to
augment file permissions with finer-grained rules for specific named users or
named groups. ACLs are discussed in greater detail later in this document.
Each client process that accesses HDFS has a two-part identity composed Each client process that accesses HDFS has a two-part identity composed
of the user name, and groups list. Whenever HDFS must do a permissions of the user name, and groups list. Whenever HDFS must do a permissions
check for a file or directory foo accessed by a client process, check for a file or directory foo accessed by a client process,
@ -219,9 +223,173 @@ HDFS Permissions Guide
identity matches the super-user, parts of the name space may be identity matches the super-user, parts of the name space may be
inaccessible to the web server. inaccessible to the web server.
* ACLs (Access Control Lists)
In addition to the traditional POSIX permissions model, HDFS also supports
POSIX ACLs (Access Control Lists). ACLs are useful for implementing
permission requirements that differ from the natural organizational hierarchy
of users and groups. An ACL provides a way to set different permissions for
specific named users or named groups, not only the file's owner and the
file's group.
By default, support for ACLs is disabled, and the NameNode disallows creation
of ACLs. To enable support for ACLs, set <<<dfs.namenode.acls.enabled>>> to
true in the NameNode configuration.
An ACL consists of a set of ACL entries. Each ACL entry names a specific
user or group and grants or denies read, write and execute permissions for
that specific user or group. For example:
+--
user::rw-
user:bruce:rwx #effective:r--
group::r-x #effective:r--
group:sales:rwx #effective:r--
mask::r--
other::r--
+--
ACL entries consist of a type, an optional name and a permission string.
For display purposes, ':' is used as the delimiter between each field. In
this example ACL, the file owner has read-write access, the file group has
read-execute access and others have read access. So far, this is equivalent
to setting the file's permission bits to 654.
Additionally, there are 2 extended ACL entries for the named user bruce and
the named group sales, both granted full access. The mask is a special ACL
entry that filters the permissions granted to all named user entries and
named group entries, and also the unnamed group entry. In the example, the
mask has only read permissions, and we can see that the effective permissions
of several ACL entries have been filtered accordingly.
Every ACL must have a mask. If the user doesn't supply a mask while setting
an ACL, then a mask is inserted automatically by calculating the union of
permissions on all entries that would be filtered by the mask.
Running <<<chmod>>> on a file that has an ACL actually changes the
permissions of the mask. Since the mask acts as a filter, this effectively
constrains the permissions of all extended ACL entries instead of changing
just the group entry and possibly missing other extended ACL entries.
The model also differentiates between an "access ACL", which defines the
rules to enforce during permission checks, and a "default ACL", which defines
the ACL entries that new child files or sub-directories receive automatically
during creation. For example:
+--
user::rwx
group::r-x
other::r-x
default:user::rwx
default:user:bruce:rwx #effective:r-x
default:group::r-x
default:group:sales:rwx #effective:r-x
default:mask::r-x
default:other::r-x
+--
Only directories may have a default ACL. When a new file or sub-directory is
created, it automatically copies the default ACL of its parent into its own
access ACL. A new sub-directory also copies it to its own default ACL. In
this way, the default ACL will be copied down through arbitrarily deep levels
of the file system tree as new sub-directories get created.
The exact permission values in the new child's access ACL are subject to
filtering by the mode parameter. Considering the default umask of 022, this
is typically 755 for new directories and 644 for new files. The mode
parameter filters the copied permission values for the unnamed user (file
owner), the mask and other. Using this particular example ACL, and creating
a new sub-directory with 755 for the mode, this mode filtering has no effect
on the final result. However, if we consider creation of a file with 644 for
the mode, then mode filtering causes the new file's ACL to receive read-write
for the unnamed user (file owner), read for the mask and read for others.
This mask also means that effective permissions for named user bruce and
named group sales are only read.
Note that the copy occurs at time of creation of the new file or
sub-directory. Subsequent changes to the parent's default ACL do not change
existing children.
The default ACL must have all minimum required ACL entries, including the
unnamed user (file owner), unnamed group (file group) and other entries. If
the user doesn't supply one of these entries while setting a default ACL,
then the entries are inserted automatically by copying the corresponding
permissions from the access ACL, or permission bits if there is no access
ACL. The default ACL also must have mask. As described above, if the mask
is unspecified, then a mask is inserted automatically by calculating the
union of permissions on all entries that would be filtered by the mask.
When considering a file that has an ACL, the algorithm for permission checks
changes to:
* If the user name matches the owner of file, then the owner
permissions are tested;
* Else if the user name matches the name in one of the named user entries,
then these permissions are tested, filtered by the mask permissions;
* Else if the group of file matches any member of the groups list,
and if these permissions filtered by the mask grant access, then these
permissions are used;
* Else if there is a named group entry matching a member of the groups list,
and if these permissions filtered by the mask grant access, then these
permissions are used;
* Else if the file group or any named group entry matches a member of the
groups list, but access was not granted by any of those permissions, then
access is denied;
* Otherwise the other permissions of file are tested.
Best practice is to rely on traditional permission bits to implement most
permission requirements, and define a smaller number of ACLs to augment the
permission bits with a few exceptional rules. A file with an ACL incurs an
additional cost in memory in the NameNode compared to a file that has only
permission bits.
* ACLs File System API
New methods:
* <<<public void modifyAclEntries(Path path, List<AclEntry> aclSpec) throws
IOException;>>>
* <<<public void removeAclEntries(Path path, List<AclEntry> aclSpec) throws
IOException;>>>
* <<<public void public void removeDefaultAcl(Path path) throws
IOException;>>>
* <<<public void removeAcl(Path path) throws IOException;>>>
* <<<public void setAcl(Path path, List<AclEntry> aclSpec) throws
IOException;>>>
* <<<public AclStatus getAclStatus(Path path) throws IOException;>>>
* ACLs Shell Commands
* <<<hdfs dfs -getfacl [-R] <path> >>>
Displays the Access Control Lists (ACLs) of files and directories. If a
directory has a default ACL, then getfacl also displays the default ACL.
* <<<hdfs dfs -setfacl [-R] [{-b|-k} {-m|-x <acl_spec>} <path>]|[--set <acl_spec> <path>] >>>
Sets Access Control Lists (ACLs) of files and directories.
* <<<hdfs dfs -ls <args> >>>
The output of <<<ls>>> will append a '+' character to the permissions
string of any file or directory that has an ACL.
See the {{{../hadoop-common/FileSystemShell.html}File System Shell}}
documentation for full coverage of these commands.
* Configuration Parameters * Configuration Parameters
* <<<dfs.permissions = true>>> * <<<dfs.permissions.enabled = true>>>
If yes use the permissions system as described here. If no, If yes use the permissions system as described here. If no,
permission checking is turned off, but all other behavior is permission checking is turned off, but all other behavior is
@ -255,3 +423,9 @@ HDFS Permissions Guide
The administrators for the cluster specified as an ACL. This The administrators for the cluster specified as an ACL. This
controls who can access the default servlets, etc. in the HDFS. controls who can access the default servlets, etc. in the HDFS.
* <<<dfs.namenode.acls.enabled = true>>>
Set to true to enable support for HDFS ACLs (Access Control Lists). By
default, ACLs are disabled. When ACLs are disabled, the NameNode rejects
all attempts to set an ACL.

View File

@ -752,6 +752,148 @@ Content-Length: 0
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setTimes {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setTimes
** {Modify ACL Entries}
* Submit a HTTP PUT request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=MODIFYACLENTRIES
&aclspec=<ACLSPEC>"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.modifyAclEntries
** {Remove ACL Entries}
* Submit a HTTP PUT request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEACLENTRIES
&aclspec=<ACLSPEC>"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeAclEntries
** {Remove Default ACL}
* Submit a HTTP PUT request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEDEFAULTACL"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeDefaultAcl
** {Remove ACL}
* Submit a HTTP PUT request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEACL"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeAcl
** {Set ACL}
* Submit a HTTP PUT request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETACL
&aclspec=<ACLSPEC>"
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setAcl
** {Get ACL Status}
* Submit a HTTP GET request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETACLSTATUS"
+---------------------------------
The client receives a response with a {{{ACL Status JSON Schema}<<<AclStatus>>> JSON object}}:
+---------------------------------
HTTP/1.1 200 OK
Content-Type: application/json
Transfer-Encoding: chunked
{
"AclStatus": {
"entries": [
"user:carla:rw-",
"group::r-x"
],
"group": "supergroup",
"owner": "hadoop",
"stickyBit": false
}
}
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
* {Delegation Token Operations} * {Delegation Token Operations}
** {Get Delegation Token} ** {Get Delegation Token}
@ -980,6 +1122,52 @@ Transfer-Encoding: chunked
However, if additional properties are included in the responses, they are However, if additional properties are included in the responses, they are
considered as optional properties in order to maintain compatibility. considered as optional properties in order to maintain compatibility.
** {ACL Status JSON Schema}
+---------------------------------
{
"name" : "AclStatus",
"properties":
{
"AclStatus":
{
"type" : "object",
"properties":
{
"entries":
{
"type": "array"
"items":
{
"description": "ACL entry.",
"type": "string"
}
},
"group":
{
"description": "The group owner.",
"type" : "string",
"required" : true
},
"owner":
{
"description": "The user who is the owner.",
"type" : "string",
"required" : true
},
"stickyBit":
{
"description": "True if the sticky bit is on.",
"type" : "boolean",
"required" : true
},
}
}
}
}
+---------------------------------
** {Boolean JSON Schema} ** {Boolean JSON Schema}
+--------------------------------- +---------------------------------
@ -1387,6 +1575,23 @@ var tokenProperties =
* {HTTP Query Parameter Dictionary} * {HTTP Query Parameter Dictionary}
** {ACL Spec}
*----------------+-------------------------------------------------------------------+
|| Name | <<<aclspec>>> |
*----------------+-------------------------------------------------------------------+
|| Description | The ACL spec included in ACL modification operations. |
*----------------+-------------------------------------------------------------------+
|| Type | String |
*----------------+-------------------------------------------------------------------+
|| Default Value | \<empty\> |
*----------------+-------------------------------------------------------------------+
|| Valid Values | See {{{./HdfsPermissionsGuide.html}Permissions and HDFS}}. |
*----------------+-------------------------------------------------------------------+
|| Syntax | See {{{./HdfsPermissionsGuide.html}Permissions and HDFS}}. |
*----------------+-------------------------------------------------------------------+
** {Access Time} ** {Access Time}
*----------------+-------------------------------------------------------------------+ *----------------+-------------------------------------------------------------------+

View File

@ -0,0 +1,84 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestAclCLI extends CLITestHelperDFS {
private MiniDFSCluster cluster = null;
private FileSystem fs = null;
private String namenode = null;
private String username = null;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
fs = cluster.getFileSystem();
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
username = System.getProperty("user.name");
}
@After
@Override
public void tearDown() throws Exception {
super.tearDown();
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
@Override
protected String getTestFile() {
return "testAclCLI.xml";
}
@Override
protected String expandCommand(final String cmd) {
String expCmd = cmd;
expCmd = expCmd.replaceAll("NAMENODE", namenode);
expCmd = expCmd.replaceAll("USERNAME", username);
expCmd = expCmd.replaceAll("#LF#",
System.getProperty("line.separator"));
expCmd = super.expandCommand(expCmd);
return expCmd;
}
@Override
protected Result execute(CLICommand cmd) throws Exception {
return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
}
@Test
@Override
public void testAll() {
super.testAll();
}
}

View File

@ -17,15 +17,21 @@
*/ */
package org.apache.hadoop.fs.permission; package org.apache.hadoop.fs.permission;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -33,8 +39,12 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
public class TestStickyBit { public class TestStickyBit {
@ -43,56 +53,89 @@ public class TestStickyBit {
UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"}); UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
static UserGroupInformation user2 = static UserGroupInformation user2 =
UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"}); UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"});
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem hdfs;
private static FileSystem hdfsAsUser1;
private static FileSystem hdfsAsUser2;
@BeforeClass
public static void init() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
initCluster(true);
}
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
.build();
hdfs = cluster.getFileSystem();
assertTrue(hdfs instanceof DistributedFileSystem);
hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
}
@Before
public void setup() throws Exception {
if (hdfs != null) {
for (FileStatus stat: hdfs.listStatus(new Path("/"))) {
hdfs.delete(stat.getPath(), true);
}
}
}
@AfterClass
public static void shutdown() throws Exception {
IOUtils.cleanup(null, hdfs, hdfsAsUser1, hdfsAsUser2);
if (cluster != null) {
cluster.shutdown();
}
}
/** /**
* Ensure that even if a file is in a directory with the sticky bit on, * Ensure that even if a file is in a directory with the sticky bit on,
* another user can write to that file (assuming correct permissions). * another user can write to that file (assuming correct permissions).
*/ */
private void confirmCanAppend(Configuration conf, FileSystem hdfs, private void confirmCanAppend(Configuration conf, Path p) throws Exception {
Path baseDir) throws IOException, InterruptedException {
// Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
// Write a file to the new tmp directory as a regular user // Write a file to the new tmp directory as a regular user
hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
Path file = new Path(p, "foo"); Path file = new Path(p, "foo");
writeFile(hdfs, file); writeFile(hdfsAsUser1, file);
hdfs.setPermission(file, new FsPermission((short) 0777)); hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));
// Log onto cluster as another user and attempt to append to file // Log onto cluster as another user and attempt to append to file
hdfs = DFSTestUtil.getFileSystemAs(user2, conf);
Path file2 = new Path(p, "foo"); Path file2 = new Path(p, "foo");
FSDataOutputStream h = hdfs.append(file2); FSDataOutputStream h = null;
h.write("Some more data".getBytes()); try {
h.close(); h = hdfsAsUser2.append(file2);
h.write("Some more data".getBytes());
h.close();
h = null;
} finally {
IOUtils.cleanup(null, h);
}
} }
/** /**
* Test that one user can't delete another user's file when the sticky bit is * Test that one user can't delete another user's file when the sticky bit is
* set. * set.
*/ */
private void confirmDeletingFiles(Configuration conf, FileSystem hdfs, private void confirmDeletingFiles(Configuration conf, Path p)
Path baseDir) throws IOException, InterruptedException { throws Exception {
Path p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
// Write a file to the new temp directory as a regular user // Write a file to the new temp directory as a regular user
hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
Path file = new Path(p, "foo"); Path file = new Path(p, "foo");
writeFile(hdfs, file); writeFile(hdfsAsUser1, file);
// Make sure the correct user is the owner // Make sure the correct user is the owner
assertEquals(user1.getShortUserName(), hdfs.getFileStatus(file).getOwner()); assertEquals(user1.getShortUserName(),
hdfsAsUser1.getFileStatus(file).getOwner());
// Log onto cluster as another user and attempt to delete the file // Log onto cluster as another user and attempt to delete the file
FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user2, conf);
try { try {
hdfs2.delete(file, false); hdfsAsUser2.delete(file, false);
fail("Shouldn't be able to delete someone else's file with SB on"); fail("Shouldn't be able to delete someone else's file with SB on");
} catch (IOException ioe) { } catch (IOException ioe) {
assertTrue(ioe instanceof AccessControlException); assertTrue(ioe instanceof AccessControlException);
@ -105,13 +148,8 @@ public class TestStickyBit {
* on, the new directory does not automatically get a sticky bit, as is * on, the new directory does not automatically get a sticky bit, as is
* standard Unix behavior * standard Unix behavior
*/ */
private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path baseDir) private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path p)
throws IOException { throws IOException {
Path p = new Path(baseDir, "scissorsisters");
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
// Create a subdirectory within it // Create a subdirectory within it
Path p2 = new Path(p, "bar"); Path p2 = new Path(p, "bar");
hdfs.mkdirs(p2); hdfs.mkdirs(p2);
@ -123,23 +161,19 @@ public class TestStickyBit {
/** /**
* Test basic ability to get and set sticky bits on files and directories. * Test basic ability to get and set sticky bits on files and directories.
*/ */
private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir) private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir)
throws IOException { throws IOException {
Path p1 = new Path(baseDir, "roguetraders");
hdfs.mkdirs(p1);
// Initially sticky bit should not be set // Initially sticky bit should not be set
assertFalse(hdfs.getFileStatus(p1).getPermission().getStickyBit()); assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit());
// Same permission, but with sticky bit on // Same permission, but with sticky bit on
short withSB; short withSB;
withSB = (short) (hdfs.getFileStatus(p1).getPermission().toShort() | 01000); withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000);
assertTrue((new FsPermission(withSB)).getStickyBit()); assertTrue((new FsPermission(withSB)).getStickyBit());
hdfs.setPermission(p1, new FsPermission(withSB)); hdfs.setPermission(p, new FsPermission(withSB));
assertTrue(hdfs.getFileStatus(p1).getPermission().getStickyBit()); assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit());
// Write a file to the fs, try to set its sticky bit // Write a file to the fs, try to set its sticky bit
Path f = new Path(baseDir, "somefile"); Path f = new Path(baseDir, "somefile");
@ -154,37 +188,78 @@ public class TestStickyBit {
} }
@Test @Test
public void testGeneralSBBehavior() throws IOException, InterruptedException { public void testGeneralSBBehavior() throws Exception {
MiniDFSCluster cluster = null; Path baseDir = new Path("/mcgann");
try { hdfs.mkdirs(baseDir);
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem hdfs = cluster.getFileSystem(); // Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
assertTrue(hdfs instanceof DistributedFileSystem); hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
Path baseDir = new Path("/mcgann"); confirmCanAppend(conf, p);
hdfs.mkdirs(baseDir);
confirmCanAppend(conf, hdfs, baseDir);
baseDir = new Path("/eccleston"); baseDir = new Path("/eccleston");
hdfs.mkdirs(baseDir); hdfs.mkdirs(baseDir);
confirmSettingAndGetting(hdfs, baseDir); p = new Path(baseDir, "roguetraders");
baseDir = new Path("/tennant"); hdfs.mkdirs(p);
hdfs.mkdirs(baseDir); confirmSettingAndGetting(hdfs, p, baseDir);
confirmDeletingFiles(conf, hdfs, baseDir);
baseDir = new Path("/smith"); baseDir = new Path("/tennant");
hdfs.mkdirs(baseDir); hdfs.mkdirs(baseDir);
confirmStickyBitDoesntPropagate(hdfs, baseDir); p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
confirmDeletingFiles(conf, p);
} finally { baseDir = new Path("/smith");
if (cluster != null) hdfs.mkdirs(baseDir);
cluster.shutdown(); p = new Path(baseDir, "scissorsisters");
}
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
confirmStickyBitDoesntPropagate(hdfs, baseDir);
}
@Test
public void testAclGeneralSBBehavior() throws Exception {
Path baseDir = new Path("/mcgann");
hdfs.mkdirs(baseDir);
// Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
applyAcl(p);
confirmCanAppend(conf, p);
baseDir = new Path("/eccleston");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "roguetraders");
hdfs.mkdirs(p);
applyAcl(p);
confirmSettingAndGetting(hdfs, p, baseDir);
baseDir = new Path("/tennant");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
applyAcl(p);
confirmDeletingFiles(conf, p);
baseDir = new Path("/smith");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "scissorsisters");
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
applyAcl(p);
confirmStickyBitDoesntPropagate(hdfs, p);
} }
/** /**
@ -192,46 +267,42 @@ public class TestStickyBit {
* bit is set. * bit is set.
*/ */
@Test @Test
public void testMovingFiles() throws IOException, InterruptedException { public void testMovingFiles() throws Exception {
MiniDFSCluster cluster = null; testMovingFiles(false);
}
@Test
public void testAclMovingFiles() throws Exception {
testMovingFiles(true);
}
private void testMovingFiles(boolean useAcl) throws Exception {
// Create a tmp directory with wide-open permissions and sticky bit
Path tmpPath = new Path("/tmp");
Path tmpPath2 = new Path("/tmp2");
hdfs.mkdirs(tmpPath);
hdfs.mkdirs(tmpPath2);
hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
if (useAcl) {
applyAcl(tmpPath);
}
hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
if (useAcl) {
applyAcl(tmpPath2);
}
// Write a file to the new tmp directory as a regular user
Path file = new Path(tmpPath, "foo");
writeFile(hdfsAsUser1, file);
// Log onto cluster as another user and attempt to move the file
try { try {
// Set up cluster for testing hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed"));
Configuration conf = new HdfsConfiguration(); fail("Shouldn't be able to rename someone else's file with SB on");
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); } catch (IOException ioe) {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); assertTrue(ioe instanceof AccessControlException);
FileSystem hdfs = cluster.getFileSystem(); assertTrue(ioe.getMessage().contains("sticky bit"));
assertTrue(hdfs instanceof DistributedFileSystem);
// Create a tmp directory with wide-open permissions and sticky bit
Path tmpPath = new Path("/tmp");
Path tmpPath2 = new Path("/tmp2");
hdfs.mkdirs(tmpPath);
hdfs.mkdirs(tmpPath2);
hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
// Write a file to the new tmp directory as a regular user
Path file = new Path(tmpPath, "foo");
FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user1, conf);
writeFile(hdfs2, file);
// Log onto cluster as another user and attempt to move the file
FileSystem hdfs3 = DFSTestUtil.getFileSystemAs(user2, conf);
try {
hdfs3.rename(file, new Path(tmpPath2, "renamed"));
fail("Shouldn't be able to rename someone else's file with SB on");
} catch (IOException ioe) {
assertTrue(ioe instanceof AccessControlException);
assertTrue(ioe.getMessage().contains("sticky bit"));
}
} finally {
if (cluster != null)
cluster.shutdown();
} }
} }
@ -241,56 +312,91 @@ public class TestStickyBit {
* re-start. * re-start.
*/ */
@Test @Test
public void testStickyBitPersistence() throws IOException { public void testStickyBitPersistence() throws Exception {
MiniDFSCluster cluster = null; // A tale of three directories...
try { Path sbSet = new Path("/Housemartins");
Configuration conf = new HdfsConfiguration(); Path sbNotSpecified = new Path("/INXS");
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); Path sbSetOff = new Path("/Easyworld");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem hdfs = cluster.getFileSystem();
assertTrue(hdfs instanceof DistributedFileSystem); for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
hdfs.mkdirs(p);
// A tale of three directories... // Two directories had there sticky bits set explicitly...
Path sbSet = new Path("/Housemartins"); hdfs.setPermission(sbSet, new FsPermission((short) 01777));
Path sbNotSpecified = new Path("/INXS"); hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
Path sbSetOff = new Path("/Easyworld");
for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff }) shutdown();
hdfs.mkdirs(p);
// Two directories had there sticky bits set explicitly... // Start file system up again
hdfs.setPermission(sbSet, new FsPermission((short) 01777)); initCluster(false);
hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
cluster.shutdown(); assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
// Start file system up again assertTrue(hdfs.exists(sbNotSpecified));
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build(); assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
hdfs = cluster.getFileSystem(); .getStickyBit());
assertTrue(hdfs.exists(sbSet)); assertTrue(hdfs.exists(sbSetOff));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit()); assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
}
assertTrue(hdfs.exists(sbNotSpecified)); @Test
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission() public void testAclStickyBitPersistence() throws Exception {
.getStickyBit()); // A tale of three directories...
Path sbSet = new Path("/Housemartins");
Path sbNotSpecified = new Path("/INXS");
Path sbSetOff = new Path("/Easyworld");
assertTrue(hdfs.exists(sbSetOff)); for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit()); hdfs.mkdirs(p);
} finally { // Two directories had there sticky bits set explicitly...
if (cluster != null) hdfs.setPermission(sbSet, new FsPermission((short) 01777));
cluster.shutdown(); applyAcl(sbSet);
} hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
applyAcl(sbSetOff);
shutdown();
// Start file system up again
initCluster(false);
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
.getStickyBit());
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
} }
/*** /***
* Write a quick file to the specified file system at specified path * Write a quick file to the specified file system at specified path
*/ */
static private void writeFile(FileSystem hdfs, Path p) throws IOException { static private void writeFile(FileSystem hdfs, Path p) throws IOException {
FSDataOutputStream o = hdfs.create(p); FSDataOutputStream o = null;
o.write("some file contents".getBytes()); try {
o.close(); o = hdfs.create(p);
o.write("some file contents".getBytes());
o.close();
o = null;
} finally {
IOUtils.cleanup(null, o);
}
}
/**
* Applies an ACL (both access and default) to the given path.
*
* @param p Path to set
* @throws IOException if an ACL could not be modified
*/
private static void applyAcl(Path p) throws IOException {
hdfs.modifyAclEntries(p, Arrays.asList(
aclEntry(ACCESS, USER, user2.getShortUserName(), ALL),
aclEntry(DEFAULT, USER, user2.getShortUserName(), ALL)));
} }
} }

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -29,6 +30,7 @@ import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo; import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@ -1076,6 +1078,8 @@ public class DFSTestUtil {
filesystem.removeCacheDirective(id); filesystem.removeCacheDirective(id);
// OP_REMOVE_CACHE_POOL // OP_REMOVE_CACHE_POOL
filesystem.removeCachePool("pool1"); filesystem.removeCachePool("pool1");
// OP_SET_ACL
filesystem.setAcl(pathConcatTarget, Lists.<AclEntry> newArrayList());
} }
public static void abortStream(DFSOutputStream out) throws IOException { public static void abortStream(DFSOutputStream out) throws IOException {

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@ -65,6 +66,7 @@ public class TestSafeMode {
public void startUp() throws IOException { public void startUp() throws IOException {
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive(); cluster.waitActive();
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
@ -328,12 +330,48 @@ public class TestSafeMode {
fs.setTimes(file1, 0, 0); fs.setTimes(file1, 0, 0);
}}); }});
runFsFun("modifyAclEntries while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.modifyAclEntries(file1, Lists.<AclEntry>newArrayList());
}});
runFsFun("removeAclEntries while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeAclEntries(file1, Lists.<AclEntry>newArrayList());
}});
runFsFun("removeDefaultAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeDefaultAcl(file1);
}});
runFsFun("removeAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeAcl(file1);
}});
runFsFun("setAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setAcl(file1, Lists.<AclEntry>newArrayList());
}});
try { try {
DFSTestUtil.readFile(fs, file1); DFSTestUtil.readFile(fs, file1);
} catch (IOException ioe) { } catch (IOException ioe) {
fail("Set times failed while in SM"); fail("Set times failed while in SM");
} }
try {
fs.getAclStatus(file1);
} catch (IOException ioe) {
fail("getAclStatus failed while in SM");
}
assertFalse("Could not leave SM", assertFalse("Could not leave SM",
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE)); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
} }

View File

@ -26,6 +26,11 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
@ -69,6 +74,7 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
@ -581,4 +587,39 @@ public class TestPBHelper {
assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C), assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C),
HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C); HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C);
} }
@Test
public void testAclEntryProto() {
// All fields populated.
AclEntry e1 = new AclEntry.Builder().setName("test")
.setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT)
.setType(AclEntryType.OTHER).build();
// No name.
AclEntry e2 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER).setPermission(FsAction.ALL).build();
// No permission, which will default to the 0'th enum element.
AclEntry e3 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER).setName("test").build();
AclEntry[] expected = new AclEntry[] { e1, e2,
new AclEntry.Builder()
.setScope(e3.getScope())
.setType(e3.getType())
.setName(e3.getName())
.setPermission(FsAction.NONE)
.build() };
AclEntry[] actual = Lists.newArrayList(
PBHelper.convertAclEntry(PBHelper.convertAclEntryProto(Lists
.newArrayList(e1, e2, e3)))).toArray(new AclEntry[0]);
Assert.assertArrayEquals(expected, actual);
}
@Test
public void testAclStatusProto() {
AclEntry e = new AclEntry.Builder().setName("test")
.setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT)
.setType(AclEntryType.OTHER).build();
AclStatus s = new AclStatus.Builder().owner("foo").group("bar").addEntry(e)
.build();
Assert.assertEquals(s, PBHelper.convert(PBHelper.convert(s)));
}
} }

View File

@ -0,0 +1,115 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
/**
* Helper methods useful for writing ACL tests.
*/
public final class AclTestHelpers {
/**
* Create a new AclEntry with scope, type and permission (no name).
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param permission FsAction set of permissions in the ACL entry
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
FsAction permission) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setPermission(permission)
.build();
}
/**
* Create a new AclEntry with scope, type, name and permission.
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param name String optional ACL entry name
* @param permission FsAction set of permissions in the ACL entry
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
String name, FsAction permission) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setName(name)
.setPermission(permission)
.build();
}
/**
* Create a new AclEntry with scope, type and name (no permission).
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param name String optional ACL entry name
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
String name) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setName(name)
.build();
}
/**
* Create a new AclEntry with scope and type (no name or permission).
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.build();
}
/**
* Asserts the value of the FsPermission bits on the inode of a specific path.
*
* @param fs FileSystem to use for check
* @param pathToCheck Path inode to check
* @param perm short expected permission bits
* @throws IOException thrown if there is an I/O error
*/
public static void assertPermission(FileSystem fs, Path pathToCheck,
short perm) throws IOException {
assertEquals(perm, fs.getFileStatus(pathToCheck).getPermission().toShort());
}
}

View File

@ -97,6 +97,7 @@ public class OfflineEditsViewerHelper {
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
config.setBoolean( config.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = cluster =
new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build(); new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build();
cluster.waitClusterUp(); cluster.waitClusterUp();

View File

@ -0,0 +1,189 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.google.common.collect.Lists;
/**
* Tests that the configuration flag that controls support for ACLs is off by
* default and causes all attempted operations related to ACLs to fail. The
* NameNode can still load ACLs from fsimage or edits.
*/
public class TestAclConfigFlag {
private static final Path PATH = new Path("/path");
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
@Rule
public ExpectedException exception = ExpectedException.none();
@After
public void shutdown() throws Exception {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testModifyAclEntries() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.modifyAclEntries(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testRemoveAclEntries() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.removeAclEntries(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testRemoveDefaultAcl() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.removeAclEntries(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testRemoveAcl() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.removeAcl(PATH);
}
@Test
public void testSetAcl() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.setAcl(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testGetAclStatus() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.getAclStatus(PATH);
}
@Test
public void testEditLog() throws Exception {
// With ACLs enabled, set an ACL.
initCluster(true, true);
fs.mkdirs(PATH);
fs.setAcl(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
// Restart with ACLs disabled. Expect successful restart.
restart(false, false);
}
@Test
public void testFsImage() throws Exception {
// With ACLs enabled, set an ACL.
initCluster(true, true);
fs.mkdirs(PATH);
fs.setAcl(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
// Save a new checkpoint and restart with ACLs still enabled.
restart(true, true);
// Restart with ACLs disabled. Expect successful restart.
restart(false, false);
}
/**
* We expect an AclException, and we want the exception text to state the
* configuration key that controls ACL support.
*/
private void expectException() {
exception.expect(AclException.class);
exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY);
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @param aclsEnabled if true, ACL support is enabled
* @throws Exception if any step fails
*/
private void initCluster(boolean format, boolean aclsEnabled)
throws Exception {
Configuration conf = new Configuration();
// not explicitly setting to false, should be false by default
if (aclsEnabled) {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
}
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @param aclsEnabled if true, ACL support is enabled
* @throws Exception if restart fails
*/
private void restart(boolean checkpoint, boolean aclsEnabled)
throws Exception {
NameNode nameNode = cluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false, aclsEnabled);
}
}

View File

@ -0,0 +1,227 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.Lists;
public class TestFSImageWithAcl {
private static Configuration conf;
private static MiniDFSCluster cluster;
@BeforeClass
public static void setUp() throws IOException {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
}
@AfterClass
public static void tearDown() {
cluster.shutdown();
}
private void testAcl(boolean persistNamespace) throws IOException {
Path p = new Path("/p");
DistributedFileSystem fs = cluster.getFileSystem();
fs.create(p).close();
fs.mkdirs(new Path("/23"));
AclEntry e = new AclEntry.Builder().setName("foo")
.setPermission(READ_EXECUTE).setScope(ACCESS).setType(USER).build();
fs.modifyAclEntries(p, Lists.newArrayList(e));
restart(fs, persistNamespace);
AclStatus s = cluster.getNamesystem().getAclStatus(p.toString());
AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray(
new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(ACCESS, GROUP, READ) }, returned);
fs.removeAcl(p);
if (persistNamespace) {
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode();
cluster.waitActive();
s = cluster.getNamesystem().getAclStatus(p.toString());
returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] { }, returned);
fs.modifyAclEntries(p, Lists.newArrayList(e));
s = cluster.getNamesystem().getAclStatus(p.toString());
returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(ACCESS, GROUP, READ) }, returned);
}
@Test
public void testPersistAcl() throws IOException {
testAcl(true);
}
@Test
public void testAclEditLog() throws IOException {
testAcl(false);
}
private void doTestDefaultAclNewChildren(boolean persistNamespace)
throws IOException {
Path dirPath = new Path("/dir");
Path filePath = new Path(dirPath, "file1");
Path subdirPath = new Path(dirPath, "subdir1");
DistributedFileSystem fs = cluster.getFileSystem();
fs.mkdirs(dirPath);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(dirPath, aclSpec);
fs.create(filePath).close();
fs.mkdirs(subdirPath);
AclEntry[] fileExpected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
AclEntry[] subdirExpected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclEntry[] fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE));
fs.modifyAclEntries(dirPath, aclSpec);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
fs.removeAcl(dirPath);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)0755);
}
@Test
public void testFsImageDefaultAclNewChildren() throws IOException {
doTestDefaultAclNewChildren(true);
}
@Test
public void testEditLogDefaultAclNewChildren() throws IOException {
doTestDefaultAclNewChildren(false);
}
/**
* Restart the NameNode, optionally saving a new checkpoint.
*
* @param fs DistributedFileSystem used for saving namespace
* @param persistNamespace boolean true to save a new checkpoint
* @throws IOException if restart fails
*/
private void restart(DistributedFileSystem fs, boolean persistNamespace)
throws IOException {
if (persistNamespace) {
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode();
cluster.waitActive();
}
}

View File

@ -0,0 +1,417 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.Arrays;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Unit tests covering FSPermissionChecker. All tests in this suite have been
* cross-validated against Linux setfacl/getfacl to check for consistency of the
* HDFS implementation.
*/
public class TestFSPermissionChecker {
private static final long PREFERRED_BLOCK_SIZE = 128 * 1024 * 1024;
private static final short REPLICATION = 3;
private static final String SUPERGROUP = "supergroup";
private static final String SUPERUSER = "superuser";
private static final UserGroupInformation BRUCE =
UserGroupInformation.createUserForTesting("bruce", new String[] { });
private static final UserGroupInformation DIANA =
UserGroupInformation.createUserForTesting("diana", new String[] { "sales" });
private static final UserGroupInformation CLARK =
UserGroupInformation.createUserForTesting("clark", new String[] { "execs" });
private INodeDirectory inodeRoot;
@Before
public void setUp() {
PermissionStatus permStatus = PermissionStatus.createImmutable(SUPERUSER,
SUPERGROUP, FsPermission.createImmutable((short)0755));
inodeRoot = new INodeDirectory(INodeId.ROOT_INODE_ID,
INodeDirectory.ROOT_NAME, permStatus, 0L);
}
@Test
public void testAclOwner() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(BRUCE, "/file1", READ);
assertPermissionGranted(BRUCE, "/file1", WRITE);
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionDenied(BRUCE, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
}
@Test
public void testAclNamedUser() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclNamedUserDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", NONE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", READ);
}
@Test
public void testAclNamedUserTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "diana", NONE),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, MASK, READ_EXECUTE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", ALL);
}
@Test
public void testAclNamedUserMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0620);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, WRITE),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclGroup() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclGroupDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0604);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, MASK, NONE),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclGroupTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, MASK, NONE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", ALL);
}
@Test
public void testAclGroupTraverseDenyOnlyDefaultEntries() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, "sales", NONE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", ALL);
}
@Test
public void testAclGroupMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclNamedGroup() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "sales", READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclNamedGroupDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "execs", NONE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclNamedGroupTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "sales", NONE),
aclEntry(ACCESS, MASK, READ_EXECUTE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", ALL);
}
@Test
public void testAclNamedGroupMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "sales", READ_WRITE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclOther() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0774);
addAcl(inodeFile,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", ALL);
assertPermissionGranted(DIANA, "/file1", ALL);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
private void addAcl(INodeWithAdditionalFields inode, AclEntry... acl)
throws IOException {
AclStorage.updateINodeAcl((INodeWithAdditionalFields)inode,
Arrays.asList(acl), Snapshot.CURRENT_STATE_ID);
}
private void assertPermissionGranted(UserGroupInformation user, String path,
FsAction access) throws IOException {
new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path,
inodeRoot, false, null, null, access, null, true);
}
private void assertPermissionDenied(UserGroupInformation user, String path,
FsAction access) throws IOException {
try {
new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(path,
inodeRoot, false, null, null, access, null, true);
fail("expected AccessControlException for user + " + user + ", path = " +
path + ", access = " + access);
} catch (AccessControlException e) {
// expected
}
}
private static INodeDirectory createINodeDirectory(INodeDirectory parent,
String name, String owner, String group, short perm) throws IOException {
PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group,
FsPermission.createImmutable(perm));
INodeDirectory inodeDirectory = new INodeDirectory(
INodeId.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L);
parent.addChild(inodeDirectory);
return inodeDirectory;
}
private static INodeFile createINodeFile(INodeDirectory parent, String name,
String owner, String group, short perm) throws IOException {
PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group,
FsPermission.createImmutable(perm));
INodeFile inodeFile = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION,
PREFERRED_BLOCK_SIZE);
parent.addChild(inodeFile);
return inodeFile;
}
}

View File

@ -0,0 +1,43 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.BeforeClass;
/**
* Tests NameNode interaction for all ACL modification APIs. This test suite
* also covers interaction of setPermission with inodes that have ACLs.
*/
public class TestNameNodeAcl extends FSAclBaseTest {
@BeforeClass
public static void init() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
assertTrue(fs instanceof DistributedFileSystem);
}
}

View File

@ -91,6 +91,7 @@ public class TestNamenodeRetryCache {
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive(); cluster.waitActive();
namesystem = cluster.getNamesystem(); namesystem = cluster.getNamesystem();

View File

@ -125,6 +125,7 @@ public class TestRetryCacheWithHA {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()) .nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(DataNodes).build(); .numDataNodes(DataNodes).build();

View File

@ -0,0 +1,778 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.junit.Assert.*;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.google.common.collect.Lists;
/**
* Tests interaction of ACLs with snapshots.
*/
public class TestAclWithSnapshot {
private static final UserGroupInformation BRUCE =
UserGroupInformation.createUserForTesting("bruce", new String[] { });
private static final UserGroupInformation DIANA =
UserGroupInformation.createUserForTesting("diana", new String[] { });
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem fsAsBruce, fsAsDiana;
private static DistributedFileSystem hdfs;
private static int pathCount = 0;
private static Path path, snapshotPath;
private static String snapshotName;
@Rule
public ExpectedException exception = ExpectedException.none();
@BeforeClass
public static void init() throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
initCluster(true);
}
@AfterClass
public static void shutdown() throws Exception {
IOUtils.cleanup(null, hdfs, fsAsBruce, fsAsDiana);
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void setUp() {
++pathCount;
path = new Path("/p" + pathCount);
snapshotName = "snapshot" + pathCount;
snapshotPath = new Path(path, new Path(".snapshot", snapshotName));
}
@Test
public void testOriginalAclEnforcedForSnapshotRootAfterChange()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
// Original has changed, but snapshot still has old ACL.
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(false);
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(true);
doSnapshotRootChangeAssertions(path, snapshotPath);
}
private static void doSnapshotRootChangeAssertions(Path path,
Path snapshotPath) throws Exception {
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0550, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionGranted(fsAsDiana, DIANA, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotContentsAfterChange()
throws Exception {
Path filePath = new Path(path, "file1");
Path subdirPath = new Path(path, "subdir1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
Path subdirSnapshotPath = new Path(snapshotPath, "subdir1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777));
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable(
(short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
assertFilePermissionGranted(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirPath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
// Original has changed, but snapshot still has old ACL.
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(false);
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(true);
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
}
private static void doSnapshotContentsChangeAssertions(Path filePath,
Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath)
throws Exception {
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0570, filePath);
assertFilePermissionDenied(fsAsBruce, BRUCE, filePath);
assertFilePermissionGranted(fsAsDiana, DIANA, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0570, subdirPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionGranted(fsAsDiana, DIANA, subdirPath);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotRootAfterRemoval()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
hdfs.removeAcl(path);
// Original has changed, but snapshot still has old ACL.
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(false);
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(true);
doSnapshotRootRemovalAssertions(path, snapshotPath);
}
private static void doSnapshotRootRemovalAssertions(Path path,
Path snapshotPath) throws Exception {
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0700, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0750, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotContentsAfterRemoval()
throws Exception {
Path filePath = new Path(path, "file1");
Path subdirPath = new Path(path, "subdir1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
Path subdirSnapshotPath = new Path(snapshotPath, "subdir1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777));
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable(
(short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
assertFilePermissionGranted(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirPath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
hdfs.removeAcl(filePath);
hdfs.removeAcl(subdirPath);
// Original has changed, but snapshot still has old ACL.
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(false);
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(true);
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
}
private static void doSnapshotContentsRemovalAssertions(Path filePath,
Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath)
throws Exception {
AclEntry[] expected = new AclEntry[] { };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0500, filePath);
assertFilePermissionDenied(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0500, subdirPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
}
@Test
public void testModifyReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL));
hdfs.modifyAclEntries(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana", READ_EXECUTE));
hdfs.modifyAclEntries(path, aclSpec);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", ALL),
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0770, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionGranted(fsAsDiana, DIANA, path);
}
@Test
public void testRemoveReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL));
hdfs.modifyAclEntries(path, aclSpec);
hdfs.removeAcl(path);
AclEntry[] expected = new AclEntry[] { };
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0700, path);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
}
@Test
public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE));
hdfs.modifyAclEntries(path, aclSpec);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)0700, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)0700, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, snapshotPath);
}
@Test
public void testModifyAclEntriesSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE));
exception.expect(SnapshotAccessControlException.class);
hdfs.modifyAclEntries(snapshotPath, aclSpec);
}
@Test
public void testRemoveAclEntriesSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce"));
exception.expect(SnapshotAccessControlException.class);
hdfs.removeAclEntries(snapshotPath, aclSpec);
}
@Test
public void testRemoveDefaultAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
exception.expect(SnapshotAccessControlException.class);
hdfs.removeDefaultAcl(snapshotPath);
}
@Test
public void testRemoveAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
exception.expect(SnapshotAccessControlException.class);
hdfs.removeAcl(snapshotPath);
}
@Test
public void testSetAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce"));
exception.expect(SnapshotAccessControlException.class);
hdfs.setAcl(snapshotPath, aclSpec);
}
@Test
public void testChangeAclExceedsQuota() throws Exception {
Path filePath = new Path(path, "file1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_WRITE));
hdfs.modifyAclEntries(filePath, aclSpec);
hdfs.createSnapshot(path, snapshotName);
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ));
exception.expect(NSQuotaExceededException.class);
hdfs.modifyAclEntries(filePath, aclSpec);
}
@Test
public void testRemoveAclExceedsQuota() throws Exception {
Path filePath = new Path(path, "file1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_WRITE));
hdfs.modifyAclEntries(filePath, aclSpec);
hdfs.createSnapshot(path, snapshotName);
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)0660, filePath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ));
exception.expect(NSQuotaExceededException.class);
hdfs.removeAcl(filePath);
}
@Test
public void testGetAclStatusDotSnapshotPath() throws Exception {
hdfs.mkdirs(path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
AclStatus s = hdfs.getAclStatus(new Path(path, ".snapshot"));
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
}
/**
* Asserts that permission is denied to the given fs/user for the given
* directory.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path directory to check
* @throws Exception if there is an unexpected error
*/
private static void assertDirPermissionDenied(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.listStatus(pathToCheck);
fail("expected AccessControlException for user " + user + ", path = " +
pathToCheck);
} catch (AccessControlException e) {
// expected
}
}
/**
* Asserts that permission is granted to the given fs/user for the given
* directory.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path directory to check
* @throws Exception if there is an unexpected error
*/
private static void assertDirPermissionGranted(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.listStatus(pathToCheck);
} catch (AccessControlException e) {
fail("expected permission granted for user " + user + ", path = " +
pathToCheck);
}
}
/**
* Asserts that permission is denied to the given fs/user for the given file.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path file to check
* @throws Exception if there is an unexpected error
*/
private static void assertFilePermissionDenied(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.open(pathToCheck).close();
fail("expected AccessControlException for user " + user + ", path = " +
pathToCheck);
} catch (AccessControlException e) {
// expected
}
}
/**
* Asserts that permission is granted to the given fs/user for the given file.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path file to check
* @throws Exception if there is an unexpected error
*/
private static void assertFilePermissionGranted(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.open(pathToCheck).close();
} catch (AccessControlException e) {
fail("expected permission granted for user " + user + ", path = " +
pathToCheck);
}
}
/**
* Asserts the value of the FsPermission bits on the inode of the test path.
*
* @param perm short expected permission bits
* @param pathToCheck Path to check
* @throws Exception thrown if there is an unexpected error
*/
private static void assertPermission(short perm, Path pathToCheck)
throws Exception {
AclTestHelpers.assertPermission(hdfs, pathToCheck, perm);
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem
* instances for our test users.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @throws Exception if any step fails
*/
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf);
fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf);
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @throws Exception if restart fails
*/
private static void restart(boolean checkpoint) throws Exception {
NameNode nameNode = cluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false);
}
}

View File

@ -305,7 +305,8 @@ public class TestDiff {
final int i = Diff.search(current, inode.getKey()); final int i = Diff.search(current, inode.getKey());
Assert.assertTrue(i >= 0); Assert.assertTrue(i >= 0);
final INodeDirectory oldinode = (INodeDirectory)current.get(i); final INodeDirectory oldinode = (INodeDirectory)current.get(i);
final INodeDirectory newinode = new INodeDirectory(oldinode, false, true); final INodeDirectory newinode = new INodeDirectory(oldinode, false,
oldinode.getFeatures());
newinode.setModificationTime(oldinode.getModificationTime() + 1); newinode.setModificationTime(oldinode.getModificationTime() + 1);
current.set(i, newinode); current.set(i, newinode);

View File

@ -17,11 +17,19 @@
*/ */
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.Map; import java.util.Map;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -32,6 +40,8 @@ import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON;
import com.google.common.collect.Lists;
public class TestJsonUtil { public class TestJsonUtil {
static FileStatus toFileStatus(HdfsFileStatus f, String parent) { static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
@ -135,6 +145,47 @@ public class TestJsonUtil {
response.put("ipAddr", "127.0.0.1"); response.put("ipAddr", "127.0.0.1");
checkDecodeFailure(response); checkDecodeFailure(response);
} }
@Test
public void testToAclStatus() {
String jsonString =
"{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
Map<?, ?> json = (Map<?, ?>) JSON.parse(jsonString);
List<AclEntry> aclSpec =
Lists.newArrayList(aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "user1", READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
aclStatusBuilder.owner("testuser");
aclStatusBuilder.group("supergroup");
aclStatusBuilder.addEntries(aclSpec);
aclStatusBuilder.stickyBit(false);
Assert.assertEquals("Should be equal", aclStatusBuilder.build(),
JsonUtil.toAclStatus(json));
}
@Test
public void testToJsonFromAclStatus() {
String jsonString =
"{\"AclStatus\":{\"entries\":[\"user:user1:rwx\",\"group::rw-\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
aclStatusBuilder.owner("testuser");
aclStatusBuilder.group("supergroup");
aclStatusBuilder.stickyBit(false);
List<AclEntry> aclSpec =
Lists.newArrayList(aclEntry(ACCESS, USER,"user1", ALL),
aclEntry(ACCESS, GROUP, READ_WRITE));
aclStatusBuilder.addEntries(aclSpec);
Assert.assertEquals(jsonString,
JsonUtil.toJsonString(aclStatusBuilder.build()));
}
private void checkDecodeFailure(Map<String, Object> map) { private void checkDecodeFailure(Map<String, Object> map) {
try { try {

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
/**
* Tests ACL APIs via WebHDFS.
*/
public class TestWebHDFSAcl extends FSAclBaseTest {
@BeforeClass
public static void init() throws Exception {
Configuration conf = WebHdfsTestUtil.createConf();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
assertTrue(fs instanceof WebHdfsFileSystem);
}
/**
* We need to skip this test on WebHDFS, because WebHDFS currently cannot
* resolve symlinks.
*/
@Override
@Test
@Ignore
public void testDefaultAclNewSymlinkIntermediate() {
}
}

View File

@ -21,12 +21,14 @@ import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import java.util.Arrays; import java.util.Arrays;
import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -300,4 +302,48 @@ public class TestParam {
UserParam.setUserPatternDomain(oldDomain); UserParam.setUserPatternDomain(oldDomain);
} }
@Test
public void testAclPermissionParam() {
final AclPermissionParam p =
new AclPermissionParam("user::rwx,group::r--,other::rwx,user:user1:rwx");
List<AclEntry> setAclList =
AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",
true);
Assert.assertEquals(setAclList.toString(), p.getAclPermission(true)
.toString());
new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx");
try {
new AclPermissionParam("user::rw--,group::rwx-,other::rw-");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
new AclPermissionParam(
"user::rw-,group::rwx,other::rw-,user:user1:rwx,group:group1:rwx,other::rwx,mask::rwx,default:user:user1:rwx");
try {
new AclPermissionParam("user:r-,group:rwx,other:rw-");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new AclPermissionParam("default:::r-,default:group::rwx,other::rw-");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
} }

View File

@ -17,6 +17,10 @@
*/ */
package org.apache.hadoop.security; package org.apache.hadoop.security;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -24,6 +28,7 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -66,6 +71,7 @@ public class TestPermissionSymlinks {
@BeforeClass @BeforeClass
public static void beforeClassSetUp() throws Exception { public static void beforeClassSetUp() throws Exception {
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.set(FsPermission.UMASK_LABEL, "000"); conf.set(FsPermission.UMASK_LABEL, "000");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive(); cluster.waitActive();
@ -101,8 +107,43 @@ public class TestPermissionSymlinks {
@Test(timeout = 5000) @Test(timeout = 5000)
public void testDelete() throws Exception { public void testDelete() throws Exception {
// Try to delete where the symlink's parent dir is not writable
fs.setPermission(linkParent, new FsPermission((short) 0555)); fs.setPermission(linkParent, new FsPermission((short) 0555));
doDeleteLinkParentNotWritable();
fs.setPermission(linkParent, new FsPermission((short) 0777));
fs.setPermission(targetParent, new FsPermission((short) 0555));
fs.setPermission(target, new FsPermission((short) 0555));
doDeleteTargetParentAndTargetNotWritable();
}
@Test
public void testAclDelete() throws Exception {
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doDeleteLinkParentNotWritable();
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(targetParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doDeleteTargetParentAndTargetNotWritable();
}
private void doDeleteLinkParentNotWritable() throws Exception {
// Try to delete where the symlink's parent dir is not writable
try { try {
user.doAs(new PrivilegedExceptionAction<Object>() { user.doAs(new PrivilegedExceptionAction<Object>() {
@Override @Override
@ -116,11 +157,11 @@ public class TestPermissionSymlinks {
} catch (AccessControlException e) { } catch (AccessControlException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e); GenericTestUtils.assertExceptionContains("Permission denied", e);
} }
}
private void doDeleteTargetParentAndTargetNotWritable() throws Exception {
// Try a delete where the symlink parent dir is writable, // Try a delete where the symlink parent dir is writable,
// but the target's parent and target are not // but the target's parent and target are not
fs.setPermission(linkParent, new FsPermission((short) 0777));
fs.setPermission(targetParent, new FsPermission((short) 0555));
fs.setPermission(target, new FsPermission((short) 0555));
user.doAs(new PrivilegedExceptionAction<Object>() { user.doAs(new PrivilegedExceptionAction<Object>() {
@Override @Override
public Object run() throws IOException { public Object run() throws IOException {
@ -139,6 +180,20 @@ public class TestPermissionSymlinks {
@Test(timeout = 5000) @Test(timeout = 5000)
public void testReadWhenTargetNotReadable() throws Exception { public void testReadWhenTargetNotReadable() throws Exception {
fs.setPermission(target, new FsPermission((short) 0000)); fs.setPermission(target, new FsPermission((short) 0000));
doReadTargetNotReadable();
}
@Test
public void testAclReadTargetNotReadable() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, user.getUserName(), NONE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, READ)));
doReadTargetNotReadable();
}
private void doReadTargetNotReadable() throws Exception {
try { try {
user.doAs(new PrivilegedExceptionAction<Object>() { user.doAs(new PrivilegedExceptionAction<Object>() {
@Override @Override
@ -157,8 +212,22 @@ public class TestPermissionSymlinks {
@Test(timeout = 5000) @Test(timeout = 5000)
public void testFileStatus() throws Exception { public void testFileStatus() throws Exception {
// Try to getFileLinkStatus the link when the target is not readable
fs.setPermission(target, new FsPermission((short) 0000)); fs.setPermission(target, new FsPermission((short) 0000));
doGetFileLinkStatusTargetNotReadable();
}
@Test
public void testAclGetFileLinkStatusTargetNotReadable() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, user.getUserName(), NONE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, READ)));
doGetFileLinkStatusTargetNotReadable();
}
private void doGetFileLinkStatusTargetNotReadable() throws Exception {
// Try to getFileLinkStatus the link when the target is not readable
user.doAs(new PrivilegedExceptionAction<Object>() { user.doAs(new PrivilegedExceptionAction<Object>() {
@Override @Override
public Object run() throws IOException { public Object run() throws IOException {
@ -176,9 +245,28 @@ public class TestPermissionSymlinks {
@Test(timeout = 5000) @Test(timeout = 5000)
public void testRenameLinkTargetNotWritableFC() throws Exception { public void testRenameLinkTargetNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
fs.setPermission(target, new FsPermission((short) 0555)); fs.setPermission(target, new FsPermission((short) 0555));
fs.setPermission(targetParent, new FsPermission((short) 0555)); fs.setPermission(targetParent, new FsPermission((short) 0555));
doRenameLinkTargetNotWritableFC();
}
@Test
public void testAclRenameTargetNotWritableFC() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(targetParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameLinkTargetNotWritableFC();
}
private void doRenameLinkTargetNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
user.doAs(new PrivilegedExceptionAction<Object>() { user.doAs(new PrivilegedExceptionAction<Object>() {
@Override @Override
public Object run() throws IOException { public Object run() throws IOException {
@ -197,8 +285,22 @@ public class TestPermissionSymlinks {
@Test(timeout = 5000) @Test(timeout = 5000)
public void testRenameSrcNotWritableFC() throws Exception { public void testRenameSrcNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
fs.setPermission(linkParent, new FsPermission((short) 0555)); fs.setPermission(linkParent, new FsPermission((short) 0555));
doRenameSrcNotWritableFC();
}
@Test
public void testAclRenameSrcNotWritableFC() throws Exception {
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameSrcNotWritableFC();
}
private void doRenameSrcNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
try { try {
user.doAs(new PrivilegedExceptionAction<Object>() { user.doAs(new PrivilegedExceptionAction<Object>() {
@Override @Override
@ -220,9 +322,28 @@ public class TestPermissionSymlinks {
@Test(timeout = 5000) @Test(timeout = 5000)
public void testRenameLinkTargetNotWritableFS() throws Exception { public void testRenameLinkTargetNotWritableFS() throws Exception {
// Rename the link when the target and parent are not writable
fs.setPermission(target, new FsPermission((short) 0555)); fs.setPermission(target, new FsPermission((short) 0555));
fs.setPermission(targetParent, new FsPermission((short) 0555)); fs.setPermission(targetParent, new FsPermission((short) 0555));
doRenameLinkTargetNotWritableFS();
}
@Test
public void testAclRenameTargetNotWritableFS() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(targetParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameLinkTargetNotWritableFS();
}
private void doRenameLinkTargetNotWritableFS() throws Exception {
// Rename the link when the target and parent are not writable
user.doAs(new PrivilegedExceptionAction<Object>() { user.doAs(new PrivilegedExceptionAction<Object>() {
@Override @Override
public Object run() throws IOException { public Object run() throws IOException {
@ -241,8 +362,22 @@ public class TestPermissionSymlinks {
@Test(timeout = 5000) @Test(timeout = 5000)
public void testRenameSrcNotWritableFS() throws Exception { public void testRenameSrcNotWritableFS() throws Exception {
// Rename the link when the target and parent are not writable
fs.setPermission(linkParent, new FsPermission((short) 0555)); fs.setPermission(linkParent, new FsPermission((short) 0555));
doRenameSrcNotWritableFS();
}
@Test
public void testAclRenameSrcNotWritableFS() throws Exception {
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameSrcNotWritableFS();
}
private void doRenameSrcNotWritableFS() throws Exception {
// Rename the link when the target and parent are not writable
try { try {
user.doAs(new PrivilegedExceptionAction<Object>() { user.doAs(new PrivilegedExceptionAction<Object>() {
@Override @Override
@ -258,6 +393,4 @@ public class TestPermissionSymlinks {
GenericTestUtils.assertExceptionContains("Permission denied", e); GenericTestUtils.assertExceptionContains("Permission denied", e);
} }
} }
} }

View File

@ -1310,4 +1310,11 @@
>>>>>>> .merge-right.r1559304 >>>>>>> .merge-right.r1559304
</DATA> </DATA>
</RECORD> </RECORD>
<RECORD>
<OPCODE>OP_SET_ACL</OPCODE>
<DATA>
<TXID>73</TXID>
<SRC>/file_set_acl</SRC>
</DATA>
</RECORD>
</EDITS> </EDITS>

View File

@ -0,0 +1,915 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="testConf.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<!-- Normal mode is test. To run just the commands and dump the output
to the log, set it to nocompare -->
<mode>test</mode>
<!-- Comparator types:
ExactComparator
SubstringComparator
RegexpComparator
TokenComparator
-->
<tests>
<!-- Tests for setfacl and getfacl-->
<test>
<description>getfacl: basic permissions</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -getfacl /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /file1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getfacl: basic permissions for directory</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Add an ACL</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r-- /file1</command>
<command>-fs NAMENODE -getfacl /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /file1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:bob:r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Add multiple ACLs at once</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /file1</command>
<command>-fs NAMENODE -getfacl /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /file1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:bob:r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:users:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Remove an ACL</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r--,user:charlie:r-x /file1</command>
<command>-fs NAMENODE -setfacl -x user:bob /file1</command>
<command>-fs NAMENODE -getfacl /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /file1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:charlie:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!bob)*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Add default ACL</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:bob:r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:users:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user:charlie:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group:admin:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:mask::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:other::r-x</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : try adding default ACL to file</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>setfacl: Invalid ACL: only directories may have a default ACL</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Remove one default ACL</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -setfacl -x default:user:charlie /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:bob:r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:users:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group:admin:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:mask::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:other::r-x</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!default:user:charlie).*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Remove all default ACL</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:bob:r--,group:users:r-x /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -setfacl -k /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:bob:r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:users:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!default).*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Remove all but base ACLs for a directory</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -setfacl -b /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!charlie).*</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!default).*</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!admin).*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : Remove all but base ACLs for a file</description>
<test-commands>
<command>-fs NAMENODE -touchz /file1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /file1</command>
<command>-fs NAMENODE -setfacl -b /file1</command>
<command>-fs NAMENODE -getfacl /file1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /file1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /file1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!charlie).*</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!admin).*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : check inherit default ACL to file</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -touchz /dir1/file</command>
<command>-fs NAMENODE -getfacl /dir1/file</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1/file</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:charlie:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:admin:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r--</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r--</expected-output>
</comparator>
<comparator>
<type>RegexpAcrossOutputComparator</type>
<expected-output>.*(?!default).*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl : check inherit default ACL to dir</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:r-x,default:group:admin:rwx /dir1</command>
<command>-fs NAMENODE -mkdir /dir1/dir2</command>
<command>-fs NAMENODE -getfacl /dir1/dir2</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1/dir2</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user:charlie:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group:admin:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user:charlie:r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group:admin:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:mask::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:other::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getfacl -R : recursive</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /dir1</command>
<command>-fs NAMENODE -mkdir /dir1/dir2</command>
<command>-fs NAMENODE -setfacl -m user:user1:r-x,group:users:rwx /dir1/dir2</command>
<command>-fs NAMENODE -getfacl -R /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:r-x#LF#group::r-x#LF#group:admin:rwx#LF#mask::rwx#LF#other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:user1:r-x#LF#group::r-x#LF#group:users:rwx#LF#mask::rwx#LF#other::r-x#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl -R : recursive</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -mkdir /dir1/dir2</command>
<command>-fs NAMENODE -setfacl -R -m user:charlie:r-x,group:admin:rwx /dir1</command>
<command>-fs NAMENODE -getfacl -R /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:r-x#LF#group::r-x#LF#group:admin:rwx#LF#mask::rwx#LF#other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:r-x#LF#group::r-x#LF#group:admin:rwx#LF#mask::rwx#LF#other::r-x#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl --set : Set full set of ACLs</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /dir1</command>
<command>-fs NAMENODE -setfacl --set user::rw-,group::r--,other::r--,user:user1:r-x,group:users:rw- /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:user1:r-x#LF#group::r--#LF#group:users:rw-#LF#mask::rwx#LF#other::r--#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>setfacl -x mask : remove mask entry along with other ACL entries</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:r-x,group:admin:rwx /dir1</command>
<command>-fs NAMENODE -setfacl -x mask::,user:charlie,group:admin /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>ExactComparator</type>
<expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#group::r-x#LF#other::r-x#LF##LF#</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getfacl: only default ACL</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m default:user:charlie:rwx /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user:charlie:rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:group::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:mask::rwx</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:other::r-x</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getfacl: effective permissions</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<command>-fs NAMENODE -setfacl -m user:charlie:rwx,group::-wx,group:sales:rwx,mask::r-x,default:user:charlie:rwx,default:group::r-x,default:group:sales:rwx,default:mask::rw- /dir1</command>
<command>-fs NAMENODE -getfacl /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output># file: /dir1</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># owner: USERNAME</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output># group: supergroup</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>user::rwx</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^user:charlie:rwx\s+#effective:r-x$</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^group::-wx\s+#effective:--x$</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^group:sales:rwx\s+#effective:r-x$</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>mask::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>other::r-x</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:user::rwx</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^default:user:charlie:rwx\s+#effective:rw-$</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^default:group::r-x\s+#effective:r--$</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^default:group:sales:rwx\s+#effective:rw-$</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:mask::rw-</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>default:other::r-x</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>ls: display extended acl marker</description>
<test-commands>
<command>-fs NAMENODE -mkdir -p /dir1/dir2</command>
<command>-fs NAMENODE -setfacl -m user:charlie:rwx,group::-wx,group:sales:rwx,mask::r-x,default:user:charlie:rwx,default:group::r-x,default:group:sales:rwx,default:mask::rw- /dir1/dir2</command>
<command>-fs NAMENODE -ls /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -R /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>TokenComparator</type>
<expected-output>Found 1 items</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^drwxr-xr-x\+( )*-( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir2</expected-output>
</comparator>
</comparators>
</test>
</tests>
</configuration>

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.mapreduce.v2.hs; package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -169,8 +170,21 @@ public class JobHistoryServer extends CompositeService {
} }
protected void doSecureLogin(Configuration conf) throws IOException { protected void doSecureLogin(Configuration conf) throws IOException {
InetSocketAddress socAddr = getBindAddress(conf);
SecurityUtil.login(conf, JHAdminConfig.MR_HISTORY_KEYTAB, SecurityUtil.login(conf, JHAdminConfig.MR_HISTORY_KEYTAB,
JHAdminConfig.MR_HISTORY_PRINCIPAL); JHAdminConfig.MR_HISTORY_PRINCIPAL, socAddr.getHostName());
}
/**
* Retrieve JHS bind address from configuration
*
* @param conf
* @return InetSocketAddress
*/
public static InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_PORT);
} }
@Override @Override

View File

@ -22,6 +22,8 @@ Release 2.5.0 - UNRELEASED
NEW FEATURES NEW FEATURES
IMPROVEMENTS IMPROVEMENTS
YARN-1479. Invalid NaN values in Hadoop REST API JSON response (Chen He via
jeagles)
OPTIMIZATIONS OPTIMIZATIONS
@ -200,6 +202,10 @@ Release 2.4.0 - UNRELEASED
be available across RM failover by making using of a remote be available across RM failover by making using of a remote
configuration-provider. (Xuan Gong via vinodkv) configuration-provider. (Xuan Gong via vinodkv)
YARN-1666. Modified RM HA handling of include/exclude node-lists to be
available across RM failover by making using of a remote
configuration-provider. (Xuan Gong via vinodkv)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -295,6 +301,14 @@ Release 2.4.0 - UNRELEASED
YARN-1724. Race condition in Fair Scheduler when continuous scheduling is YARN-1724. Race condition in Fair Scheduler when continuous scheduling is
turned on (Sandy Ryza) turned on (Sandy Ryza)
YARN-1590. Fixed ResourceManager, web-app proxy and MR JobHistoryServer to
expand _HOST properly in their kerberos principles. (Mohammad Kamrul Islam
va vinodkv)
YARN-1428. Fixed RM to write the final state of RMApp/RMAppAttempt to the
application history store in the transition to the final state. (Contributed
by Zhijie Shen)
Release 2.3.1 - UNRELEASED Release 2.3.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -19,6 +19,8 @@
package org.apache.hadoop.yarn.conf; package org.apache.hadoop.yarn.conf;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -42,15 +44,16 @@ public abstract class ConfigurationProvider {
} }
/** /**
* Get the configuration and combine with bootstrapConf * Opens an InputStream at the indicated file
* @param bootstrapConf Configuration * @param bootstrapConf Configuration
* @param name The configuration file name * @param name The configuration file name
* @return configuration * @return configuration
* @throws YarnException * @throws YarnException
* @throws IOException * @throws IOException
*/ */
public abstract Configuration getConfiguration(Configuration bootstrapConf, public abstract InputStream getConfigurationInputStream(
String name) throws YarnException, IOException; Configuration bootstrapConf, String name) throws YarnException,
IOException;
/** /**
* Derived classes initialize themselves using this method. * Derived classes initialize themselves using this method.

View File

@ -45,22 +45,31 @@ public class YarnConfiguration extends Configuration {
"hadoop-policy.xml"; "hadoop-policy.xml";
@Private @Private
public static final String YARN_SITE_XML_FILE = "yarn-site.xml"; public static final String YARN_SITE_CONFIGURATION_FILE = "yarn-site.xml";
private static final String YARN_DEFAULT_CONFIGURATION_FILE =
"yarn-default.xml";
@Private @Private
public static final String CORE_SITE_CONFIGURATION_FILE = "core-site.xml"; public static final String CORE_SITE_CONFIGURATION_FILE = "core-site.xml";
@Private
public static final List<String> RM_CONFIGURATION_FILES =
Collections.unmodifiableList(Arrays.asList(
CS_CONFIGURATION_FILE,
HADOOP_POLICY_CONFIGURATION_FILE,
YARN_SITE_CONFIGURATION_FILE,
CORE_SITE_CONFIGURATION_FILE));
@Evolving @Evolving
public static final int APPLICATION_MAX_TAGS = 10; public static final int APPLICATION_MAX_TAGS = 10;
@Evolving @Evolving
public static final int APPLICATION_MAX_TAG_LENGTH = 100; public static final int APPLICATION_MAX_TAG_LENGTH = 100;
private static final String YARN_DEFAULT_XML_FILE = "yarn-default.xml";
static { static {
Configuration.addDefaultResource(YARN_DEFAULT_XML_FILE); Configuration.addDefaultResource(YARN_DEFAULT_CONFIGURATION_FILE);
Configuration.addDefaultResource(YARN_SITE_XML_FILE); Configuration.addDefaultResource(YARN_SITE_CONFIGURATION_FILE);
} }
//Configurations //Configurations
@ -861,6 +870,9 @@ public class YarnConfiguration extends Configuration {
/** The address for the web proxy.*/ /** The address for the web proxy.*/
public static final String PROXY_ADDRESS = public static final String PROXY_ADDRESS =
PROXY_PREFIX + "address"; PROXY_PREFIX + "address";
public static final int DEFAULT_PROXY_PORT = 9099;
public static final String DEFAULT_PROXY_ADDRESS =
"0.0.0.0:" + DEFAULT_RM_PORT;
/** /**
* YARN Service Level Authorization * YARN Service Level Authorization

Some files were not shown because too many files have changed in this diff Show More