HDFS-6570. add api that enables checking if a user has certain permissions on a file. Contributed by Jitendra Pandey.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614723 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris Nauroth 2014-07-30 17:49:09 +00:00
parent 9b2bfc7c28
commit 535fe14ded
34 changed files with 670 additions and 11 deletions

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
@ -803,6 +804,18 @@ public abstract class AbstractFileSystem {
throws AccessControlException, FileNotFoundException, throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException; UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#access(Path, FsAction)}
* except that an UnresolvedLinkException may be thrown if a symlink is
* encountered in the path.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
}
/** /**
* The specification of this method matches that of * The specification of this method matches that of
* {@link FileContext#getFileLinkStatus(Path)} * {@link FileContext#getFileLinkStatus(Path)}

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
@ -1108,6 +1109,55 @@ public final class FileContext {
}.resolve(this, absF); }.resolve(this, absF);
} }
/**
* Checks if the user can access a path. The mode specifies which access
* checks to perform. If the requested permissions are granted, then the
* method returns normally. If access is denied, then the method throws an
* {@link AccessControlException}.
* <p/>
* The default implementation of this method calls {@link #getFileStatus(Path)}
* and checks the returned permissions against the requested permissions.
* Note that the getFileStatus call will be subject to authorization checks.
* Typically, this requires search (execute) permissions on each directory in
* the path's prefix, but this is implementation-defined. Any file system
* that provides a richer authorization model (such as ACLs) may override the
* default implementation so that it checks against that model instead.
* <p>
* In general, applications should avoid using this method, due to the risk of
* time-of-check/time-of-use race conditions. The permissions on a file may
* change immediately after the access call returns. Most applications should
* prefer running specific file system actions as the desired user represented
* by a {@link UserGroupInformation}.
*
* @param path Path to check
* @param mode type of access to check
* @throws AccessControlException if access is denied
* @throws FileNotFoundException if the path does not exist
* @throws UnsupportedFileSystemException if file system for <code>path</code>
* is not supported
* @throws IOException see specific implementation
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
public void access(final Path path, final FsAction mode)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absPath = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(AbstractFileSystem fs, Path p) throws IOException,
UnresolvedLinkException {
fs.access(p, mode);
return null;
}
}.resolve(this, absPath);
}
/** /**
* Return a file status object that represents the path. If the path * Return a file status object that represents the path. If the path
* refers to a symlink then the FileStatus of the symlink is returned. * refers to a symlink then the FileStatus of the symlink is returned.

View File

@ -25,6 +25,7 @@ import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
@ -50,6 +51,7 @@ import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -2072,6 +2074,71 @@ public abstract class FileSystem extends Configured implements Closeable {
*/ */
public abstract FileStatus getFileStatus(Path f) throws IOException; public abstract FileStatus getFileStatus(Path f) throws IOException;
/**
* Checks if the user can access a path. The mode specifies which access
* checks to perform. If the requested permissions are granted, then the
* method returns normally. If access is denied, then the method throws an
* {@link AccessControlException}.
* <p/>
* The default implementation of this method calls {@link #getFileStatus(Path)}
* and checks the returned permissions against the requested permissions.
* Note that the getFileStatus call will be subject to authorization checks.
* Typically, this requires search (execute) permissions on each directory in
* the path's prefix, but this is implementation-defined. Any file system
* that provides a richer authorization model (such as ACLs) may override the
* default implementation so that it checks against that model instead.
* <p>
* In general, applications should avoid using this method, due to the risk of
* time-of-check/time-of-use race conditions. The permissions on a file may
* change immediately after the access call returns. Most applications should
* prefer running specific file system actions as the desired user represented
* by a {@link UserGroupInformation}.
*
* @param path Path to check
* @param mode type of access to check
* @throws AccessControlException if access is denied
* @throws FileNotFoundException if the path does not exist
* @throws IOException see specific implementation
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, IOException {
checkAccessPermissions(this.getFileStatus(path), mode);
}
/**
* This method provides the default implementation of
* {@link #access(Path, FsAction)}.
*
* @param stat FileStatus to check
* @param mode type of access to check
* @throws IOException for any error
*/
@InterfaceAudience.Private
static void checkAccessPermissions(FileStatus stat, FsAction mode)
throws IOException {
FsPermission perm = stat.getPermission();
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String user = ugi.getShortUserName();
List<String> groups = Arrays.asList(ugi.getGroupNames());
if (user.equals(stat.getOwner())) {
if (perm.getUserAction().implies(mode)) {
return;
}
} else if (groups.contains(stat.getGroup())) {
if (perm.getGroupAction().implies(mode)) {
return;
}
} else {
if (perm.getOtherAction().implies(mode)) {
return;
}
}
throw new AccessControlException(String.format(
"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
}
/** /**
* See {@link FileContext#fixRelativePart} * See {@link FileContext#fixRelativePart}
*/ */

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
@ -397,6 +398,12 @@ public class FilterFileSystem extends FileSystem {
return fs.getFileStatus(f); return fs.getFileStatus(f);
} }
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, IOException {
fs.access(path, mode);
}
public void createSymlink(final Path target, final Path link, public void createSymlink(final Path target, final Path link,
final boolean createParent) throws AccessControlException, final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException, FileAlreadyExistsException, FileNotFoundException,

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
@ -119,6 +120,13 @@ public abstract class FilterFs extends AbstractFileSystem {
return myFs.getFileStatus(f); return myFs.getFileStatus(f);
} }
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
checkPath(path);
myFs.access(path, mode);
}
@Override @Override
public FileStatus getFileLinkStatus(final Path f) public FileStatus getFileLinkStatus(final Path f)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {

View File

@ -41,7 +41,9 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
/** /**
@ -222,6 +224,12 @@ class ChRootedFileSystem extends FilterFileSystem {
return super.getFileStatus(fullPath(f)); return super.getFileStatus(fullPath(f));
} }
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, IOException {
super.access(fullPath(path), mode);
}
@Override @Override
public FsStatus getStatus(Path p) throws IOException { public FsStatus getStatus(Path p) throws IOException {
return super.getStatus(fullPath(p)); return super.getStatus(fullPath(p));

View File

@ -41,7 +41,9 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
@ -200,6 +202,11 @@ class ChRootedFs extends AbstractFileSystem {
return myFs.getFileStatus(fullPath(f)); return myFs.getFileStatus(fullPath(f));
} }
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
myFs.access(fullPath(path), mode);
}
@Override @Override
public FileStatus getFileLinkStatus(final Path f) public FileStatus getFileLinkStatus(final Path f)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {

View File

@ -51,6 +51,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@ -359,7 +360,14 @@ public class ViewFileSystem extends FileSystem {
return new ViewFsFileStatus(status, this.makeQualified(f)); return new ViewFsFileStatus(status, this.makeQualified(f));
} }
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.access(res.remainingPath, mode);
}
@Override @Override
public FileStatus[] listStatus(final Path f) throws AccessControlException, public FileStatus[] listStatus(final Path f) throws AccessControlException,
FileNotFoundException, IOException { FileNotFoundException, IOException {

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.fs.local.LocalConfigKeys;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@ -352,6 +353,14 @@ public class ViewFs extends AbstractFileSystem {
return new ViewFsFileStatus(status, this.makeQualified(f)); return new ViewFsFileStatus(status, this.makeQualified(f));
} }
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.access(res.remainingPath, mode);
}
@Override @Override
public FileStatus getFileLinkStatus(final Path f) public FileStatus getFileLinkStatus(final Path f)
throws AccessControlException, FileNotFoundException, throws AccessControlException, FileNotFoundException,

View File

@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
@ -201,6 +202,8 @@ public class TestHarFileSystem {
public void removeXAttr(Path path, String name) throws IOException; public void removeXAttr(Path path, String name) throws IOException;
public AclStatus getAclStatus(Path path) throws IOException; public AclStatus getAclStatus(Path path) throws IOException;
public void access(Path path, FsAction mode) throws IOException;
} }
@Test @Test

View File

@ -332,6 +332,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6778. The extended attributes javadoc should simply refer to the HDFS-6778. The extended attributes javadoc should simply refer to the
user docs. (clamb via wang) user docs. (clamb via wang)
HDFS-6570. add api that enables checking if a user has certain permissions on
a file. (Jitendra Pandey via cnauroth)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang) HDFS-6690. Deduplicate xattr names in memory. (wang)

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.hdfs.CorruptFileBlockIterator; import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
@ -448,6 +449,11 @@ public class Hdfs extends AbstractFileSystem {
dfs.removeXAttr(getUriPath(path), name); dfs.removeXAttr(getUriPath(path), name);
} }
@Override
public void access(Path path, final FsAction mode) throws IOException {
dfs.checkAccess(getUriPath(path), mode);
}
/** /**
* Renew an existing delegation token. * Renew an existing delegation token.
* *

View File

@ -122,6 +122,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.Peer;
@ -2832,6 +2833,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
} }
} }
public void checkAccess(String src, FsAction mode) throws IOException {
checkOpen();
try {
namenode.checkAccess(src, mode);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
@Override // RemotePeerFactory @Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr, public Peer newConnectedPeer(InetSocketAddress addr,
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)

View File

@ -59,6 +59,7 @@ import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@ -1898,4 +1899,23 @@ public class DistributedFileSystem extends FileSystem {
} }
}.resolve(this, absF); }.resolve(this, absF);
} }
@Override
public void access(Path path, final FsAction mode) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.checkAccess(getPathName(p), mode);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.access(p, mode);
return null;
}
}.resolve(this, absF);
}
} }

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@ -1327,4 +1328,22 @@ public interface ClientProtocol {
*/ */
@AtMostOnce @AtMostOnce
public void removeXAttr(String src, XAttr xAttr) throws IOException; public void removeXAttr(String src, XAttr xAttr) throws IOException;
/**
* Checks if the user can access a path. The mode specifies which access
* checks to perform. If the requested permissions are granted, then the
* method returns normally. If access is denied, then the method throws an
* {@link AccessControlException}.
* In general, applications should avoid using this method, due to the risk of
* time-of-check/time-of-use race conditions. The permissions on a file may
* change immediately after the access call returns.
*
* @param path Path to check
* @param mode type of access to check
* @throws AccessControlException if access is denied
* @throws FileNotFoundException if the path does not exist
* @throws IOException see specific implementation
*/
@Idempotent
public void checkAccess(String path, FsAction mode) throws IOException;
} }

View File

@ -174,6 +174,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
@ -320,6 +322,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
private static final RemoveXAttrResponseProto private static final RemoveXAttrResponseProto
VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance(); VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
private static final CheckAccessResponseProto
VOID_CHECKACCESS_RESPONSE = CheckAccessResponseProto.getDefaultInstance();
/** /**
* Constructor * Constructor
* *
@ -1338,4 +1343,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} }
return VOID_REMOVEXATTR_RESPONSE; return VOID_REMOVEXATTR_RESPONSE;
} }
@Override
public CheckAccessResponseProto checkAccess(RpcController controller,
CheckAccessRequestProto req) throws ServiceException {
try {
server.checkAccess(req.getPath(), PBHelper.convert(req.getMode()));
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_CHECKACCESS_RESPONSE;
}
} }

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@ -144,6 +145,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
@ -1346,4 +1348,15 @@ public class ClientNamenodeProtocolTranslatorPB implements
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
@Override
public void checkAccess(String path, FsAction mode) throws IOException {
CheckAccessRequestProto req = CheckAccessRequestProto.newBuilder()
.setPath(path).setMode(PBHelper.convert(mode)).build();
try {
rpcProxy.checkAccess(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
} }

View File

@ -2107,11 +2107,11 @@ public class PBHelper {
return castEnum(v, XATTR_NAMESPACE_VALUES); return castEnum(v, XATTR_NAMESPACE_VALUES);
} }
private static FsActionProto convert(FsAction v) { public static FsActionProto convert(FsAction v) {
return FsActionProto.valueOf(v != null ? v.ordinal() : 0); return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
} }
private static FsAction convert(FsActionProto v) { public static FsAction convert(FsActionProto v) {
return castEnum(v, FSACTION_VALUES); return castEnum(v, FSACTION_VALUES);
} }

View File

@ -8458,6 +8458,29 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
} }
void checkAccess(String src, FsAction mode) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
checkOperation(OperationCategory.READ);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
readLock();
try {
checkOperation(OperationCategory.READ);
src = FSDirectory.resolvePath(src, pathComponents, dir);
if (dir.getINode(src) == null) {
throw new FileNotFoundException("Path not found");
}
if (isPermissionEnabled) {
FSPermissionChecker pc = getPermissionChecker();
checkPathAccess(pc, src, mode);
}
} catch (AccessControlException e) {
logAuditEvent(false, "checkAccess", src);
throw e;
} finally {
readUnlock();
}
}
/** /**
* Default AuditLogger implementation; used when no access logger is * Default AuditLogger implementation; used when no access logger is
* defined in the config file. It can also be explicitly listed in the * defined in the config file. It can also be explicitly listed in the

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.ha.HealthCheckFailedException;
@ -1443,5 +1444,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
public void removeXAttr(String src, XAttr xAttr) throws IOException { public void removeXAttr(String src, XAttr xAttr) throws IOException {
namesystem.removeXAttr(src, xAttr); namesystem.removeXAttr(src, xAttr);
} }
@Override
public void checkAccess(String path, FsAction mode) throws IOException {
namesystem.checkAccess(path, mode);
}
} }

View File

@ -57,6 +57,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -112,6 +113,7 @@ import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
import org.apache.hadoop.hdfs.web.resources.XAttrNameParam; import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam; import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
import org.apache.hadoop.hdfs.web.resources.XAttrValueParam; import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
import org.apache.hadoop.hdfs.web.resources.FsActionParam;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
@ -755,10 +757,12 @@ public class NamenodeWebHdfsMethods {
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
final XAttrEncodingParam xattrEncoding, final XAttrEncodingParam xattrEncoding,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT) @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes final ExcludeDatanodesParam excludeDatanodes,
@QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
final FsActionParam fsAction
) throws IOException, InterruptedException { ) throws IOException, InterruptedException {
return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length, return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes); renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction);
} }
/** Handle HTTP GET request. */ /** Handle HTTP GET request. */
@ -789,11 +793,13 @@ public class NamenodeWebHdfsMethods {
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
final XAttrEncodingParam xattrEncoding, final XAttrEncodingParam xattrEncoding,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT) @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes final ExcludeDatanodesParam excludeDatanodes,
@QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
final FsActionParam fsAction
) throws IOException, InterruptedException { ) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, offset, length, init(ugi, delegation, username, doAsUser, path, op, offset, length,
renewer, bufferSize, xattrEncoding, excludeDatanodes); renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction);
return ugi.doAs(new PrivilegedExceptionAction<Response>() { return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override @Override
@ -801,7 +807,7 @@ public class NamenodeWebHdfsMethods {
try { try {
return get(ugi, delegation, username, doAsUser, return get(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, offset, length, renewer, bufferSize, path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
xattrNames, xattrEncoding, excludeDatanodes); xattrNames, xattrEncoding, excludeDatanodes, fsAction);
} finally { } finally {
reset(); reset();
} }
@ -822,7 +828,8 @@ public class NamenodeWebHdfsMethods {
final BufferSizeParam bufferSize, final BufferSizeParam bufferSize,
final List<XAttrNameParam> xattrNames, final List<XAttrNameParam> xattrNames,
final XAttrEncodingParam xattrEncoding, final XAttrEncodingParam xattrEncoding,
final ExcludeDatanodesParam excludeDatanodes final ExcludeDatanodesParam excludeDatanodes,
final FsActionParam fsAction
) throws IOException, URISyntaxException { ) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node"); final NameNode namenode = (NameNode)context.getAttribute("name.node");
final NamenodeProtocols np = getRPCServer(namenode); final NamenodeProtocols np = getRPCServer(namenode);
@ -919,6 +926,10 @@ public class NamenodeWebHdfsMethods {
final String js = JsonUtil.toJsonString(xAttrs); final String js = JsonUtil.toJsonString(xAttrs);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} }
case CHECKACCESS: {
np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
return Response.ok().build();
}
default: default:
throw new UnsupportedOperationException(op + " is not supported"); throw new UnsupportedOperationException(op + " is not supported");
} }

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
@ -1356,6 +1357,12 @@ public class WebHdfsFileSystem extends FileSystem
}.run(); }.run();
} }
@Override
public void access(final Path path, final FsAction mode) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS;
new FsPathRunner(op, path, new FsActionParam(mode)).run();
}
@Override @Override
public ContentSummary getContentSummary(final Path p) throws IOException { public ContentSummary getContentSummary(final Path p) throws IOException {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);

View File

@ -0,0 +1,58 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import org.apache.hadoop.fs.permission.FsAction;
import java.util.regex.Pattern;
/** {@link FsAction} Parameter */
public class FsActionParam extends StringParam {
/** Parameter name. */
public static final String NAME = "fsaction";
/** Default parameter value. */
public static final String DEFAULT = NULL;
private static String FS_ACTION_PATTERN = "[rwx-]{3}";
private static final Domain DOMAIN = new Domain(NAME,
Pattern.compile(FS_ACTION_PATTERN));
/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public FsActionParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
}
/**
* Constructor.
* @param value the parameter value.
*/
public FsActionParam(final FsAction value) {
super(DOMAIN, value == null? null: value.SYMBOL);
}
@Override
public String getName() {
return NAME;
}
}

View File

@ -39,7 +39,9 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
GETXATTRS(false, HttpURLConnection.HTTP_OK), GETXATTRS(false, HttpURLConnection.HTTP_OK),
LISTXATTRS(false, HttpURLConnection.HTTP_OK), LISTXATTRS(false, HttpURLConnection.HTTP_OK),
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED),
CHECKACCESS(false, HttpURLConnection.HTTP_OK);
final boolean redirect; final boolean redirect;
final int expectedHttpResponseCode; final int expectedHttpResponseCode;

View File

@ -654,6 +654,14 @@ message DeleteSnapshotRequestProto {
message DeleteSnapshotResponseProto { // void response message DeleteSnapshotResponseProto { // void response
} }
message CheckAccessRequestProto {
required string path = 1;
required AclEntryProto.FsActionProto mode = 2;
}
message CheckAccessResponseProto { // void response
}
service ClientNamenodeProtocol { service ClientNamenodeProtocol {
rpc getBlockLocations(GetBlockLocationsRequestProto) rpc getBlockLocations(GetBlockLocationsRequestProto)
returns(GetBlockLocationsResponseProto); returns(GetBlockLocationsResponseProto);
@ -783,4 +791,6 @@ service ClientNamenodeProtocol {
returns(ListXAttrsResponseProto); returns(ListXAttrsResponseProto);
rpc removeXAttr(RemoveXAttrRequestProto) rpc removeXAttr(RemoveXAttrRequestProto)
returns(RemoveXAttrResponseProto); returns(RemoveXAttrResponseProto);
rpc checkAccess(CheckAccessRequestProto)
returns(CheckAccessResponseProto);
} }

View File

@ -82,6 +82,9 @@ WebHDFS REST API
* {{{List all XAttrs}<<<LISTXATTRS>>>}} * {{{List all XAttrs}<<<LISTXATTRS>>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs) (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
* {{{Check access}<<<CHECKACCESS>>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access)
* HTTP PUT * HTTP PUT
* {{{Create and Write to a File}<<<CREATE>>>}} * {{{Create and Write to a File}<<<CREATE>>>}}
@ -927,6 +930,28 @@ Transfer-Encoding: chunked
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
** {Check access}
* Submit a HTTP GET request.
+---------------------------------
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CHECKACCESS
&fsaction=<FSACTION>
+---------------------------------
The client receives a response with zero content length:
+---------------------------------
HTTP/1.1 200 OK
Content-Length: 0
+---------------------------------
[]
See also:
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access
* {Extended Attributes(XAttrs) Operations} * {Extended Attributes(XAttrs) Operations}
** {Set XAttr} ** {Set XAttr}
@ -2166,6 +2191,25 @@ var tokenProperties =
{{Proxy Users}} {{Proxy Users}}
** {Fs Action}
*----------------+-------------------------------------------------------------------+
|| Name | <<<fsaction>>> |
*----------------+-------------------------------------------------------------------+
|| Description | File system operation read/write/execute |
*----------------+-------------------------------------------------------------------+
|| Type | String |
*----------------+-------------------------------------------------------------------+
|| Default Value | null (an invalid value) |
*----------------+-------------------------------------------------------------------+
|| Valid Values | Strings matching regex pattern \"[rwx-]\{3\}\" |
*----------------+-------------------------------------------------------------------+
|| Syntax | \"[rwx-]\{3\}\" |
*----------------+-------------------------------------------------------------------+
See also:
{{{Check access}<<<CHECKACCESS>>>}},
** {Group} ** {Group}
*----------------+-------------------------------------------------------------------+ *----------------+-------------------------------------------------------------------+

View File

@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
@ -36,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -421,6 +425,79 @@ public class TestDFSPermission {
} }
} }
@Test
public void testAccessOwner() throws IOException, InterruptedException {
FileSystem rootFs = FileSystem.get(conf);
Path p1 = new Path("/p1");
rootFs.mkdirs(p1);
rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME);
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
fs.setPermission(p1, new FsPermission((short) 0444));
fs.access(p1, FsAction.READ);
try {
fs.access(p1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
Path badPath = new Path("/bad/bad");
try {
fs.access(badPath, FsAction.READ);
fail("The access call should have failed");
} catch (FileNotFoundException e) {
// expected
}
}
@Test
public void testAccessGroupMember() throws IOException, InterruptedException {
FileSystem rootFs = FileSystem.get(conf);
Path p2 = new Path("/p2");
rootFs.mkdirs(p2);
rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
rootFs.setPermission(p2, new FsPermission((short) 0740));
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
fs.access(p2, FsAction.READ);
try {
fs.access(p2, FsAction.EXECUTE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
}
@Test
public void testAccessOthers() throws IOException, InterruptedException {
FileSystem rootFs = FileSystem.get(conf);
Path p3 = new Path("/p3");
rootFs.mkdirs(p3);
rootFs.setPermission(p3, new FsPermission((short) 0774));
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
fs.access(p3, FsAction.READ);
try {
fs.access(p3, FsAction.READ_WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
}
/* Check if namenode performs permission checking correctly /* Check if namenode performs permission checking correctly
* for the given user for operations mkdir, open, setReplication, * for the given user for operations mkdir, open, setReplication,
* getFileInfo, isDirectory, exists, getContentLength, list, rename, * getFileInfo, isDirectory, exists, getContentLength, list, rename,

View File

@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -36,6 +37,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@ -47,6 +49,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -297,7 +301,8 @@ public class TestSafeMode {
* assert that they are either allowed or fail as expected. * assert that they are either allowed or fail as expected.
*/ */
@Test @Test
public void testOperationsWhileInSafeMode() throws IOException { public void testOperationsWhileInSafeMode() throws IOException,
InterruptedException {
final Path file1 = new Path("/file1"); final Path file1 = new Path("/file1");
assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET)); assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
@ -407,6 +412,22 @@ public class TestSafeMode {
fail("getAclStatus failed while in SM"); fail("getAclStatus failed while in SM");
} }
// Test access
UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX");
FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws IOException {
return FileSystem.get(conf);
}
});
myfs.access(file1, FsAction.READ);
try {
myfs.access(file1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
assertFalse("Could not leave SM", assertFalse("Could not leave SM",
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE)); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
} }

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -1256,6 +1257,33 @@ public abstract class FSAclBaseTest {
fsAsDiana.getAclStatus(bruceFile); fsAsDiana.getAclStatus(bruceFile);
} }
@Test
public void testAccess() throws IOException, InterruptedException {
Path p1 = new Path("/p1");
fs.mkdirs(p1);
fs.setOwner(p1, BRUCE.getShortUserName(), "groupX");
fsAsBruce.setAcl(p1, Lists.newArrayList(
aclEntry(ACCESS, USER, READ),
aclEntry(ACCESS, USER, "bruce", READ),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE)));
fsAsBruce.access(p1, FsAction.READ);
try {
fsAsBruce.access(p1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
Path badPath = new Path("/bad/bad");
try {
fsAsBruce.access(badPath, FsAction.READ);
fail("The access call should have failed");
} catch (FileNotFoundException e) {
// expected
}
}
/** /**
* Creates a FileSystem for the super-user. * Creates a FileSystem for the super-user.
* *

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
@ -581,6 +582,7 @@ public class TestINodeFile {
fs.getAclStatus(testFileInodePath); fs.getAclStatus(testFileInodePath);
fs.getXAttrs(testFileInodePath); fs.getXAttrs(testFileInodePath);
fs.listXAttrs(testFileInodePath); fs.listXAttrs(testFileInodePath);
fs.access(testFileInodePath, FsAction.READ_WRITE);
} }
// symbolic link related tests // symbolic link related tests

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
@ -674,6 +675,13 @@ public class TestAclWithSnapshot {
} catch (AccessControlException e) { } catch (AccessControlException e) {
// expected // expected
} }
try {
fs.access(pathToCheck, FsAction.READ);
fail("The access call should have failed for "+pathToCheck);
} catch (AccessControlException e) {
// expected
}
} }
/** /**
@ -689,6 +697,7 @@ public class TestAclWithSnapshot {
UserGroupInformation user, Path pathToCheck) throws Exception { UserGroupInformation user, Path pathToCheck) throws Exception {
try { try {
fs.listStatus(pathToCheck); fs.listStatus(pathToCheck);
fs.access(pathToCheck, FsAction.READ);
} catch (AccessControlException e) { } catch (AccessControlException e) {
fail("expected permission granted for user " + user + ", path = " + fail("expected permission granted for user " + user + ", path = " +
pathToCheck); pathToCheck);

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil; import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -49,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test;
public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest { public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
private static final Configuration conf = new Configuration(); private static final Configuration conf = new Configuration();
@ -530,4 +532,35 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
} }
} }
} }
@Test
public void testAccess() throws IOException, InterruptedException {
Path p1 = new Path("/pathX");
try {
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("alpha",
new String[]{"beta"});
WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf,
WebHdfsFileSystem.SCHEME);
fs.mkdirs(p1);
fs.setPermission(p1, new FsPermission((short) 0444));
fs.access(p1, FsAction.READ);
try {
fs.access(p1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
Path badPath = new Path("/bad");
try {
fs.access(badPath, FsAction.READ);
fail("The access call should have failed");
} catch (FileNotFoundException e) {
// expected
}
} finally {
fs.delete(p1, true);
}
}
} }

View File

@ -31,6 +31,7 @@ import java.util.Arrays;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@ -40,6 +41,7 @@ import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam; import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.UserParam; import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.hdfs.web.resources.FsActionParam;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
@ -283,6 +285,28 @@ public class TestWebHdfsUrl {
}, },
fileStatusUrl); fileStatusUrl);
} }
@Test(timeout=60000)
public void testCheckAccessUrl() throws IOException {
Configuration conf = new Configuration();
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("test-user");
UserGroupInformation.setLoginUser(ugi);
WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
Path fsPath = new Path("/p1");
URL checkAccessUrl = webhdfs.toUrl(GetOpParam.Op.CHECKACCESS,
fsPath, new FsActionParam(FsAction.READ_WRITE));
checkQueryParams(
new String[]{
GetOpParam.Op.CHECKACCESS.toQueryString(),
new UserParam(ugi.getShortUserName()).toString(),
FsActionParam.NAME + "=" + FsAction.READ_WRITE.SYMBOL
},
checkAccessUrl);
}
private void checkQueryParams(String[] expected, URL url) { private void checkQueryParams(String[] expected, URL url) {
Arrays.sort(expected); Arrays.sort(expected);

View File

@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.io.FileNotFoundException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.Arrays; import java.util.Arrays;
@ -39,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestWrapper; import org.apache.hadoop.fs.FileSystemTestWrapper;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
@ -393,4 +395,37 @@ public class TestPermissionSymlinks {
GenericTestUtils.assertExceptionContains("Permission denied", e); GenericTestUtils.assertExceptionContains("Permission denied", e);
} }
} }
@Test
public void testAccess() throws Exception {
fs.setPermission(target, new FsPermission((short) 0002));
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, USER, user.getShortUserName(), WRITE),
aclEntry(ACCESS, OTHER, WRITE)));
FileContext myfc = user.doAs(new PrivilegedExceptionAction<FileContext>() {
@Override
public FileContext run() throws IOException {
return FileContext.getFileContext(conf);
}
});
// Path to targetChild via symlink
myfc.access(link, FsAction.WRITE);
try {
myfc.access(link, FsAction.ALL);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
Path badPath = new Path(link, "bad");
try {
myfc.access(badPath, FsAction.READ);
fail("The access call should have failed");
} catch (FileNotFoundException e) {
// expected
}
}
} }