HDFS-16144. Revert HDFS-15372 (Files in snapshots no longer see attribute provider permissions). Contributed by Stephen O'Donnell
(cherry picked from commit 4eae284827
)
This commit is contained in:
parent
72801be13a
commit
d661afc06f
|
@ -2052,23 +2052,7 @@ public class FSDirectory implements Closeable {
|
||||||
// first empty component for the root. however file status
|
// first empty component for the root. however file status
|
||||||
// related calls are expected to strip out the root component according
|
// related calls are expected to strip out the root component according
|
||||||
// to TestINodeAttributeProvider.
|
// to TestINodeAttributeProvider.
|
||||||
// Due to HDFS-15372 the attribute provider should received the resolved
|
byte[][] components = iip.getPathComponents();
|
||||||
// snapshot path. Ie, rather than seeing /d/.snapshot/sn/data it should
|
|
||||||
// see /d/data. However, for the path /d/.snapshot/sn it should see this
|
|
||||||
// full path. If the current inode is the snapshot name, it always has the
|
|
||||||
// same ID as its parent inode, so we can use that to check if it is the
|
|
||||||
// path which needs handled specially.
|
|
||||||
byte[][] components;
|
|
||||||
INodeDirectory parent = node.getParent();
|
|
||||||
if (iip.isSnapshot()
|
|
||||||
&& parent != null && parent.getId() != node.getId()) {
|
|
||||||
// For snapshot paths, we always user node.getPathComponents so the
|
|
||||||
// snapshot path is resolved to the real path, unless the last component
|
|
||||||
// is the snapshot name root directory.
|
|
||||||
components = node.getPathComponents();
|
|
||||||
} else {
|
|
||||||
components = iip.getPathComponents();
|
|
||||||
}
|
|
||||||
components = Arrays.copyOfRange(components, 1, components.length);
|
components = Arrays.copyOfRange(components, 1, components.length);
|
||||||
nodeAttrs = ap.getAttributes(components, nodeAttrs);
|
nodeAttrs = ap.getAttributes(components, nodeAttrs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Stack;
|
import java.util.Stack;
|
||||||
|
@ -208,7 +207,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
|
||||||
final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length];
|
final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length];
|
||||||
final byte[][] components = inodesInPath.getPathComponents();
|
final byte[][] components = inodesInPath.getPathComponents();
|
||||||
for (int i = 0; i < inodes.length && inodes[i] != null; i++) {
|
for (int i = 0; i < inodes.length && inodes[i] != null; i++) {
|
||||||
inodeAttrs[i] = getINodeAttrs(inodes[i], snapshotId);
|
inodeAttrs[i] = getINodeAttrs(components, i, inodes[i], snapshotId);
|
||||||
}
|
}
|
||||||
|
|
||||||
String path = inodesInPath.getPath();
|
String path = inodesInPath.getPath();
|
||||||
|
@ -258,7 +257,8 @@ public class FSPermissionChecker implements AccessControlEnforcer {
|
||||||
void checkPermission(INode inode, int snapshotId, FsAction access)
|
void checkPermission(INode inode, int snapshotId, FsAction access)
|
||||||
throws AccessControlException {
|
throws AccessControlException {
|
||||||
byte[][] pathComponents = inode.getPathComponents();
|
byte[][] pathComponents = inode.getPathComponents();
|
||||||
INodeAttributes nodeAttributes = getINodeAttrs(inode, snapshotId);
|
INodeAttributes nodeAttributes = getINodeAttrs(pathComponents,
|
||||||
|
pathComponents.length - 1, inode, snapshotId);
|
||||||
try {
|
try {
|
||||||
INodeAttributes[] iNodeAttr = {nodeAttributes};
|
INodeAttributes[] iNodeAttr = {nodeAttributes};
|
||||||
AccessControlEnforcer enforcer = getAccessControlEnforcer();
|
AccessControlEnforcer enforcer = getAccessControlEnforcer();
|
||||||
|
@ -367,31 +367,23 @@ public class FSPermissionChecker implements AccessControlEnforcer {
|
||||||
authzContext.getSubAccess(), authzContext.isIgnoreEmptyDir());
|
authzContext.getSubAccess(), authzContext.isIgnoreEmptyDir());
|
||||||
}
|
}
|
||||||
|
|
||||||
private INodeAttributes getINodeAttrs(INode inode, int snapshotId) {
|
private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx,
|
||||||
|
INode inode, int snapshotId) {
|
||||||
INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId);
|
INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId);
|
||||||
/**
|
|
||||||
* This logic is similar to {@link FSDirectory#getAttributes()} and it
|
|
||||||
* ensures that the attribute provider sees snapshot paths resolved to their
|
|
||||||
* original location. This means the attributeProvider can apply permissions
|
|
||||||
* to the snapshot paths in the same was as the live paths. See HDFS-15372.
|
|
||||||
*/
|
|
||||||
if (getAttributesProvider() != null) {
|
if (getAttributesProvider() != null) {
|
||||||
|
String[] elements = new String[pathIdx + 1];
|
||||||
/**
|
/**
|
||||||
* If we have an inode representing a path like /d/.snapshot/snap1
|
* {@link INode#getPathComponents(String)} returns a null component
|
||||||
* then calling inode.getPathComponents returns [null, d, snap1]. If we
|
* for the root only path "/". Assign an empty string if so.
|
||||||
* call inode.getFullPathName() it will return /d/.snapshot/snap1. For
|
|
||||||
* this special path (snapshot root) the attribute provider should see:
|
|
||||||
*
|
|
||||||
* [null, d, .snapshot/snap1]
|
|
||||||
*
|
|
||||||
* Using IIP.resolveFromRoot, it will take the inode fullPathName and
|
|
||||||
* construct an IIP object that give the correct components as above.
|
|
||||||
*/
|
*/
|
||||||
INodesInPath iip = INodesInPath.resolveFromRoot(inode);
|
if (pathByNameArr.length == 1 && pathByNameArr[0] == null) {
|
||||||
byte[][] components = iip.getPathComponents();
|
elements[0] = "";
|
||||||
components = Arrays.copyOfRange(components, 1, components.length);
|
} else {
|
||||||
inodeAttrs = getAttributesProvider()
|
for (int i = 0; i < elements.length; i++) {
|
||||||
.getAttributes(components, inodeAttrs);
|
elements[i] = DFSUtil.bytes2String(pathByNameArr[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs);
|
||||||
}
|
}
|
||||||
return inodeAttrs;
|
return inodeAttrs;
|
||||||
}
|
}
|
||||||
|
@ -447,7 +439,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
|
||||||
if (!(cList.isEmpty() && ignoreEmptyDir)) {
|
if (!(cList.isEmpty() && ignoreEmptyDir)) {
|
||||||
//TODO have to figure this out with inodeattribute provider
|
//TODO have to figure this out with inodeattribute provider
|
||||||
INodeAttributes inodeAttr =
|
INodeAttributes inodeAttr =
|
||||||
getINodeAttrs(d, snapshotId);
|
getINodeAttrs(components, pathIdx, d, snapshotId);
|
||||||
if (!hasPermission(inodeAttr, access)) {
|
if (!hasPermission(inodeAttr, access)) {
|
||||||
throw new AccessControlException(
|
throw new AccessControlException(
|
||||||
toAccessControlString(inodeAttr, d.getFullPathName(), access));
|
toAccessControlString(inodeAttr, d.getFullPathName(), access));
|
||||||
|
@ -465,7 +457,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
|
||||||
if (inodeAttr.getFsPermission().getStickyBit()) {
|
if (inodeAttr.getFsPermission().getStickyBit()) {
|
||||||
for (INode child : cList) {
|
for (INode child : cList) {
|
||||||
INodeAttributes childInodeAttr =
|
INodeAttributes childInodeAttr =
|
||||||
getINodeAttrs(child, snapshotId);
|
getINodeAttrs(components, pathIdx, child, snapshotId);
|
||||||
if (isStickyBitViolated(inodeAttr, childInodeAttr)) {
|
if (isStickyBitViolated(inodeAttr, childInodeAttr)) {
|
||||||
List<byte[]> allComponentList = new ArrayList<>();
|
List<byte[]> allComponentList = new ArrayList<>();
|
||||||
for (int i = 0; i <= pathIdx; ++i) {
|
for (int i = 0; i <= pathIdx; ++i) {
|
||||||
|
|
|
@ -135,27 +135,6 @@ public class INodesInPath {
|
||||||
return resolve(startingDir, components, false);
|
return resolve(startingDir, components, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieves the existing INodes from a path, starting at the root directory.
|
|
||||||
* The root directory is located by following the parent link in the inode
|
|
||||||
* recursively until the final root inode is found.
|
|
||||||
* The inodes returned will depend upon the output of inode.getFullPathName().
|
|
||||||
* For a snapshot path, like /data/.snapshot/snap1, it will be resolved to:
|
|
||||||
* [null, data, .snapshot/snap1]
|
|
||||||
* For a file in the snapshot, as inode.getFullPathName resolves the snapshot
|
|
||||||
* information, the returned inodes for a path like /data/.snapshot/snap1/d1
|
|
||||||
* would be:
|
|
||||||
* [null, data, d1]
|
|
||||||
* @param inode the {@link INode} to be resolved
|
|
||||||
* @return INodesInPath
|
|
||||||
*/
|
|
||||||
static INodesInPath resolveFromRoot(INode inode) {
|
|
||||||
INode[] inodes = getINodes(inode);
|
|
||||||
byte[][] paths = INode.getPathComponents(inode.getFullPathName());
|
|
||||||
INodeDirectory rootDir = inodes[0].asDirectory();
|
|
||||||
return resolve(rootDir, paths);
|
|
||||||
}
|
|
||||||
|
|
||||||
static INodesInPath resolve(final INodeDirectory startingDir,
|
static INodesInPath resolve(final INodeDirectory startingDir,
|
||||||
byte[][] components, final boolean isRaw) {
|
byte[][] components, final boolean isRaw) {
|
||||||
Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
|
Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.util.HashSet;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
|
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -33,9 +34,9 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.XAttr;
|
import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.fs.permission.*;
|
import org.apache.hadoop.fs.permission.*;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -81,7 +82,6 @@ public class TestINodeAttributeProvider {
|
||||||
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
|
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
|
||||||
}
|
}
|
||||||
CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access);
|
CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access);
|
||||||
CALLED.add("checkPermission|" + path);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -89,13 +89,13 @@ public class TestINodeAttributeProvider {
|
||||||
AuthorizationContext authzContext) throws AccessControlException {
|
AuthorizationContext authzContext) throws AccessControlException {
|
||||||
if (authzContext.getAncestorIndex() > 1
|
if (authzContext.getAncestorIndex() > 1
|
||||||
&& authzContext.getInodes()[1].getLocalName().equals("user")
|
&& authzContext.getInodes()[1].getLocalName().equals("user")
|
||||||
&& authzContext.getInodes()[2].getLocalName().equals("acl")) {
|
&& authzContext.getInodes()[2].getLocalName().equals("acl")
|
||||||
|
|| runPermissionCheck) {
|
||||||
this.ace.checkPermissionWithContext(authzContext);
|
this.ace.checkPermissionWithContext(authzContext);
|
||||||
}
|
}
|
||||||
CALLED.add("checkPermission|" + authzContext.getAncestorAccess()
|
CALLED.add("checkPermission|" + authzContext.getAncestorAccess()
|
||||||
+ "|" + authzContext.getParentAccess() + "|" + authzContext
|
+ "|" + authzContext.getParentAccess() + "|" + authzContext
|
||||||
.getAccess());
|
.getAccess());
|
||||||
CALLED.add("checkPermission|" + authzContext.getPath());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,12 +112,7 @@ public class TestINodeAttributeProvider {
|
||||||
@Override
|
@Override
|
||||||
public INodeAttributes getAttributes(String[] pathElements,
|
public INodeAttributes getAttributes(String[] pathElements,
|
||||||
final INodeAttributes inode) {
|
final INodeAttributes inode) {
|
||||||
String fullPath = String.join("/", pathElements);
|
|
||||||
if (!fullPath.startsWith("/")) {
|
|
||||||
fullPath = "/" + fullPath;
|
|
||||||
}
|
|
||||||
CALLED.add("getAttributes");
|
CALLED.add("getAttributes");
|
||||||
CALLED.add("getAttributes|"+fullPath);
|
|
||||||
final boolean useDefault = useDefault(pathElements);
|
final boolean useDefault = useDefault(pathElements);
|
||||||
final boolean useNullAcl = useNullAclFeature(pathElements);
|
final boolean useNullAcl = useNullAclFeature(pathElements);
|
||||||
return new INodeAttributes() {
|
return new INodeAttributes() {
|
||||||
|
@ -495,109 +490,63 @@ public class TestINodeAttributeProvider {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
// HDFS-15372 - Attribute provider should not see the snapshot path as it
|
// See HDFS-16132 where an issue was reported after HDFS-15372. The sequence
|
||||||
// should be resolved into the original path name before it hits the provider.
|
// of operations here causes that change to break and the test fails with:
|
||||||
public void testAttrProviderSeesResolvedSnapahotPaths() throws Exception {
|
// org.apache.hadoop.ipc.RemoteException(java.lang.AssertionError):
|
||||||
|
// Absolute path required, but got 'foo'
|
||||||
|
// at org.apache.hadoop.hdfs.server.namenode.INode.checkAbsolutePath
|
||||||
|
// (INode.java:838)
|
||||||
|
// at org.apache.hadoop.hdfs.server.namenode.INode.getPathComponents
|
||||||
|
// (INode.java:813)
|
||||||
|
// After reverting HDFS-15372 the test passes, so including this test in the
|
||||||
|
// revert for future reference.
|
||||||
|
public void testAttrProviderWorksCorrectlyOnRenamedSnapshotPaths()
|
||||||
|
throws Exception {
|
||||||
|
runPermissionCheck = true;
|
||||||
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
|
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
|
||||||
DistributedFileSystem hdfs = miniDFS.getFileSystem();
|
DistributedFileSystem hdfs = miniDFS.getFileSystem();
|
||||||
final Path userPath = new Path("/user");
|
final Path parent = new Path("/user");
|
||||||
final Path authz = new Path("/user/authz");
|
hdfs.mkdirs(parent);
|
||||||
final Path authzChild = new Path("/user/authz/child2");
|
fs.setPermission(parent, new FsPermission(HDFS_PERMISSION));
|
||||||
|
final Path sub1 = new Path(parent, "sub1");
|
||||||
|
final Path sub1foo = new Path(sub1, "foo");
|
||||||
|
hdfs.mkdirs(sub1);
|
||||||
|
hdfs.mkdirs(sub1foo);
|
||||||
|
Path f = new Path(sub1foo, "file0");
|
||||||
|
DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
|
||||||
|
hdfs.allowSnapshot(parent);
|
||||||
|
hdfs.createSnapshot(parent, "s0");
|
||||||
|
|
||||||
fs.mkdirs(userPath);
|
f = new Path(sub1foo, "file1");
|
||||||
fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
|
DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
|
||||||
fs.mkdirs(authz);
|
f = new Path(sub1foo, "file2");
|
||||||
hdfs.allowSnapshot(userPath);
|
DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
|
||||||
fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
|
|
||||||
fs.mkdirs(authzChild);
|
final Path sub2 = new Path(parent, "sub2");
|
||||||
fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
|
hdfs.mkdirs(sub2);
|
||||||
fs.createSnapshot(userPath, "snapshot_1");
|
final Path sub2foo = new Path(sub2, "foo");
|
||||||
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
|
// mv /parent/sub1/foo to /parent/sub2/foo
|
||||||
new String[]{"g1"});
|
hdfs.rename(sub1foo, sub2foo);
|
||||||
|
|
||||||
|
hdfs.createSnapshot(parent, "s1");
|
||||||
|
hdfs.createSnapshot(parent, "s2");
|
||||||
|
|
||||||
|
final Path sub3 = new Path(parent, "sub3");
|
||||||
|
hdfs.mkdirs(sub3);
|
||||||
|
// mv /parent/sub2/foo to /parent/sub3/foo
|
||||||
|
hdfs.rename(sub2foo, sub3);
|
||||||
|
|
||||||
|
hdfs.delete(sub3, true);
|
||||||
|
UserGroupInformation ugi =
|
||||||
|
UserGroupInformation.createUserForTesting("u1", new String[] {"g1"});
|
||||||
ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||||
@Override
|
@Override
|
||||||
public Void run() throws Exception {
|
public Void run() throws Exception {
|
||||||
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
|
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
|
||||||
final Path snapChild =
|
((DistributedFileSystem)fs).getSnapshotDiffReport(parent, "s1", "s2");
|
||||||
new Path("/user/.snapshot/snapshot_1/authz/child2");
|
|
||||||
// Run various methods on the path to access the attributes etc.
|
|
||||||
fs.getAclStatus(snapChild);
|
|
||||||
fs.getContentSummary(snapChild);
|
|
||||||
fs.getFileStatus(snapChild);
|
|
||||||
Assert.assertFalse(CALLED.contains("getAttributes|" +
|
|
||||||
snapChild.toString()));
|
|
||||||
Assert.assertTrue(CALLED.contains("getAttributes|/user/authz/child2"));
|
|
||||||
// The snapshot path should be seen by the permission checker, but when
|
|
||||||
// it checks access, the paths will be resolved so the attributeProvider
|
|
||||||
// only sees the resolved path.
|
|
||||||
Assert.assertTrue(
|
|
||||||
CALLED.contains("checkPermission|" + snapChild.toString()));
|
|
||||||
CALLED.clear();
|
CALLED.clear();
|
||||||
fs.getAclStatus(new Path("/"));
|
|
||||||
Assert.assertTrue(CALLED.contains("checkPermission|/"));
|
|
||||||
Assert.assertTrue(CALLED.contains("getAttributes|/"));
|
|
||||||
|
|
||||||
CALLED.clear();
|
|
||||||
fs.getFileStatus(new Path("/user"));
|
|
||||||
Assert.assertTrue(CALLED.contains("checkPermission|/user"));
|
|
||||||
Assert.assertTrue(CALLED.contains("getAttributes|/user"));
|
|
||||||
|
|
||||||
CALLED.clear();
|
|
||||||
fs.getFileStatus(new Path("/user/.snapshot"));
|
|
||||||
Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot"));
|
|
||||||
// attribute provider never sees the .snapshot path directly.
|
|
||||||
Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot"));
|
|
||||||
|
|
||||||
CALLED.clear();
|
|
||||||
fs.getFileStatus(new Path("/user/.snapshot/snapshot_1"));
|
|
||||||
Assert.assertTrue(
|
|
||||||
CALLED.contains("checkPermission|/user/.snapshot/snapshot_1"));
|
|
||||||
Assert.assertTrue(
|
|
||||||
CALLED.contains("getAttributes|/user/.snapshot/snapshot_1"));
|
|
||||||
|
|
||||||
CALLED.clear();
|
|
||||||
fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz"));
|
|
||||||
Assert.assertTrue(CALLED
|
|
||||||
.contains("checkPermission|/user/.snapshot/snapshot_1/authz"));
|
|
||||||
Assert.assertTrue(CALLED.contains("getAttributes|/user/authz"));
|
|
||||||
|
|
||||||
CALLED.clear();
|
|
||||||
fs.getFileStatus(new Path("/user/authz"));
|
|
||||||
Assert.assertTrue(CALLED.contains("checkPermission|/user/authz"));
|
|
||||||
Assert.assertTrue(CALLED.contains("getAttributes|/user/authz"));
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// Delete the files / folders covered by the snapshot, then re-check they
|
|
||||||
// are all readable correctly.
|
|
||||||
fs.delete(authz, true);
|
|
||||||
ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
|
||||||
@Override
|
|
||||||
public Void run() throws Exception {
|
|
||||||
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
|
|
||||||
|
|
||||||
CALLED.clear();
|
|
||||||
fs.getFileStatus(new Path("/user/.snapshot"));
|
|
||||||
Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot"));
|
|
||||||
// attribute provider never sees the .snapshot path directly.
|
|
||||||
Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot"));
|
|
||||||
|
|
||||||
CALLED.clear();
|
|
||||||
fs.getFileStatus(new Path("/user/.snapshot/snapshot_1"));
|
|
||||||
Assert.assertTrue(
|
|
||||||
CALLED.contains("checkPermission|/user/.snapshot/snapshot_1"));
|
|
||||||
Assert.assertTrue(
|
|
||||||
CALLED.contains("getAttributes|/user/.snapshot/snapshot_1"));
|
|
||||||
|
|
||||||
CALLED.clear();
|
|
||||||
fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz"));
|
|
||||||
Assert.assertTrue(CALLED
|
|
||||||
.contains("checkPermission|/user/.snapshot/snapshot_1/authz"));
|
|
||||||
Assert.assertTrue(CALLED.contains("getAttributes|/user/authz"));
|
|
||||||
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue