HDFS-12130. Optimizing permission check for getContentSummary. Contributed by Chen Liang
This commit is contained in:
parent
a1f12bb543
commit
f413ee33df
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
/**
|
||||
* This interface is used by the block manager to expose a
|
||||
|
@ -36,7 +37,8 @@ public interface BlockCollection {
|
|||
/**
|
||||
* Get content summary.
|
||||
*/
|
||||
public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
|
||||
public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
|
||||
throws AccessControlException;
|
||||
|
||||
/**
|
||||
* @return the number of blocks or block groups
|
||||
|
|
|
@ -20,11 +20,14 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -46,6 +49,8 @@ public class ContentSummaryComputationContext {
|
|||
|
||||
public static final String REPLICATED = "Replicated";
|
||||
public static final Log LOG = LogFactory.getLog(INode.class);
|
||||
|
||||
private FSPermissionChecker pc;
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
|
@ -57,6 +62,12 @@ public class ContentSummaryComputationContext {
|
|||
*/
|
||||
public ContentSummaryComputationContext(FSDirectory dir,
|
||||
FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
|
||||
this(dir, fsn, limitPerRun, sleepMicroSec, null);
|
||||
}
|
||||
|
||||
public ContentSummaryComputationContext(FSDirectory dir,
|
||||
FSNamesystem fsn, long limitPerRun, long sleepMicroSec,
|
||||
FSPermissionChecker pc) {
|
||||
this.dir = dir;
|
||||
this.fsn = fsn;
|
||||
this.limitPerRun = limitPerRun;
|
||||
|
@ -65,6 +76,7 @@ public class ContentSummaryComputationContext {
|
|||
this.snapshotCounts = new ContentCounts.Builder().build();
|
||||
this.sleepMilliSec = sleepMicroSec/1000;
|
||||
this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
|
||||
this.pc = pc;
|
||||
}
|
||||
|
||||
/** Constructor for blocking computation. */
|
||||
|
@ -186,4 +198,12 @@ public class ContentSummaryComputationContext {
|
|||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
void checkPermission(INodeDirectory inode, int snapshotId, FsAction access)
|
||||
throws AccessControlException {
|
||||
if (dir != null && dir.isPermissionEnabled()
|
||||
&& pc != null && !pc.isSuperUser()) {
|
||||
pc.checkPermission(inode, snapshotId, access);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
|||
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.util.EnumCounters;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
/**
|
||||
* Quota feature for {@link INodeDirectory}.
|
||||
|
@ -125,7 +126,8 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
|
|||
}
|
||||
|
||||
ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
|
||||
final ContentSummaryComputationContext summary) {
|
||||
final ContentSummaryComputationContext summary)
|
||||
throws AccessControlException {
|
||||
final long original = summary.getCounts().getStoragespace();
|
||||
long oldYieldCount = summary.getYieldCount();
|
||||
dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
|
||||
|
|
|
@ -127,10 +127,8 @@ class FSDirStatAndListingOp {
|
|||
FSDirectory fsd, String src) throws IOException {
|
||||
FSPermissionChecker pc = fsd.getPermissionChecker();
|
||||
final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
|
||||
if (fsd.isPermissionEnabled()) {
|
||||
fsd.checkPermission(pc, iip, false, null, null, null,
|
||||
FsAction.READ_EXECUTE);
|
||||
}
|
||||
// getContentSummaryInt() call will check access (if enabled) when
|
||||
// traversing all sub directories.
|
||||
return getContentSummaryInt(fsd, iip);
|
||||
}
|
||||
|
||||
|
@ -513,7 +511,8 @@ class FSDirStatAndListingOp {
|
|||
// processed. 0 means disabled. I.e. blocking for the entire duration.
|
||||
ContentSummaryComputationContext cscc =
|
||||
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
|
||||
fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
|
||||
fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(),
|
||||
fsd.getPermissionChecker());
|
||||
ContentSummary cs = targetNode.computeAndConvertContentSummary(
|
||||
iip.getPathSnapshotId(), cscc);
|
||||
fsd.addYieldCount(cscc.getYieldCount());
|
||||
|
|
|
@ -195,6 +195,38 @@ class FSPermissionChecker implements AccessControlEnforcer {
|
|||
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check permission only for the given inode (not checking the children's
|
||||
* access).
|
||||
*
|
||||
* @param inode the inode to check.
|
||||
* @param snapshotId the snapshot id.
|
||||
* @param access the target access.
|
||||
* @throws AccessControlException
|
||||
*/
|
||||
void checkPermission(INode inode, int snapshotId, FsAction access)
|
||||
throws AccessControlException {
|
||||
try {
|
||||
byte[][] localComponents = {inode.getLocalNameBytes()};
|
||||
INodeAttributes[] iNodeAttr = {inode.getSnapshotINode(snapshotId)};
|
||||
AccessControlEnforcer enforcer = getAccessControlEnforcer();
|
||||
enforcer.checkPermission(
|
||||
fsOwner, supergroup, callerUgi,
|
||||
iNodeAttr, // single inode attr in the array
|
||||
new INode[]{inode}, // single inode in the array
|
||||
localComponents, snapshotId,
|
||||
null, -1, // this will skip checkTraverse() because
|
||||
// not checking ancestor here
|
||||
false, null, null,
|
||||
access, // the target access to be checked against the inode
|
||||
null, // passing null sub access avoids checking children
|
||||
false);
|
||||
} catch (AccessControlException ace) {
|
||||
throw new AccessControlException(
|
||||
toAccessControlString(inode, inode.getFullPathName(), access));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkPermission(String fsOwner, String supergroup,
|
||||
UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
|
|||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.util.Diff;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.util.ChunkedArrayList;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
|
@ -418,7 +419,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext);
|
||||
|
||||
/** Compute {@link ContentSummary}. Blocking call */
|
||||
public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
|
||||
public final ContentSummary computeContentSummary(
|
||||
BlockStoragePolicySuite bsps) throws AccessControlException {
|
||||
return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID,
|
||||
new ContentSummaryComputationContext(bsps));
|
||||
}
|
||||
|
@ -427,7 +429,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
* Compute {@link ContentSummary}.
|
||||
*/
|
||||
public final ContentSummary computeAndConvertContentSummary(int snapshotId,
|
||||
ContentSummaryComputationContext summary) {
|
||||
ContentSummaryComputationContext summary) throws AccessControlException {
|
||||
computeContentSummary(snapshotId, summary);
|
||||
final ContentCounts counts = summary.getCounts();
|
||||
final ContentCounts snapshotCounts = summary.getSnapshotCounts();
|
||||
|
@ -461,7 +463,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
* @return The same objects as summary.
|
||||
*/
|
||||
public abstract ContentSummaryComputationContext computeContentSummary(
|
||||
int snapshotId, ContentSummaryComputationContext summary);
|
||||
int snapshotId, ContentSummaryComputationContext summary)
|
||||
throws AccessControlException;
|
||||
|
||||
|
||||
/**
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
|
@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||
|
||||
|
@ -632,7 +634,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
|
||||
@Override
|
||||
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
|
||||
ContentSummaryComputationContext summary) {
|
||||
ContentSummaryComputationContext summary) throws AccessControlException {
|
||||
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||
if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
|
||||
final ContentCounts counts = new ContentCounts.Builder().build();
|
||||
|
@ -654,7 +656,10 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
protected ContentSummaryComputationContext computeDirectoryContentSummary(
|
||||
ContentSummaryComputationContext summary, int snapshotId) {
|
||||
ContentSummaryComputationContext summary, int snapshotId)
|
||||
throws AccessControlException{
|
||||
// throws exception if failing the permission check
|
||||
summary.checkPermission(this, snapshotId, FsAction.READ_EXECUTE);
|
||||
ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
|
||||
// Explicit traversing is done to enable repositioning after relinquishing
|
||||
// and reacquiring locks.
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeat
|
|||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
/**
|
||||
* An anonymous reference to an inode.
|
||||
|
@ -314,7 +315,7 @@ public abstract class INodeReference extends INode {
|
|||
|
||||
@Override
|
||||
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
|
||||
ContentSummaryComputationContext summary) {
|
||||
ContentSummaryComputationContext summary) throws AccessControlException {
|
||||
return referred.computeContentSummary(snapshotId, summary);
|
||||
}
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
|
|||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -234,7 +235,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
|
|||
|
||||
@Override
|
||||
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
|
||||
final ContentCounts counts) {
|
||||
final ContentCounts counts) throws AccessControlException {
|
||||
counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
|
||||
counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
|
||||
super.computeContentSummary4Snapshot(bsps, counts);
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
|
|||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
|
||||
|
||||
|
@ -630,7 +631,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
|
|||
}
|
||||
|
||||
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
|
||||
final ContentCounts counts) {
|
||||
final ContentCounts counts) throws AccessControlException {
|
||||
// Create a new blank summary context for blocking processing of subtree.
|
||||
ContentSummaryComputationContext summary =
|
||||
new ContentSummaryComputationContext(bsps);
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
|||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
/** Snapshot of a sub-tree in the namesystem. */
|
||||
@InterfaceAudience.Private
|
||||
|
@ -176,7 +177,8 @@ public class Snapshot implements Comparable<byte[]> {
|
|||
|
||||
@Override
|
||||
public ContentSummaryComputationContext computeContentSummary(
|
||||
int snapshotId, ContentSummaryComputationContext summary) {
|
||||
int snapshotId, ContentSummaryComputationContext summary)
|
||||
throws AccessControlException {
|
||||
return computeDirectoryContentSummary(summary, snapshotId);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,201 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
|
||||
import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
/**
|
||||
* This class tests get content summary with permission settings.
|
||||
*/
|
||||
public class TestGetContentSummaryWithPermission {
|
||||
protected static final short REPLICATION = 3;
|
||||
protected static final long BLOCKSIZE = 1024;
|
||||
|
||||
private Configuration conf;
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem dfs;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
|
||||
cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
|
||||
cluster.waitActive();
|
||||
|
||||
dfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
cluster = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test getContentSummary for super user. For super user, whatever
|
||||
* permission the directories are with, always allowed to access
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testGetContentSummarySuperUser() throws Exception {
|
||||
final Path foo = new Path("/fooSuper");
|
||||
final Path bar = new Path(foo, "barSuper");
|
||||
final Path baz = new Path(bar, "bazSuper");
|
||||
dfs.mkdirs(bar);
|
||||
DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
|
||||
|
||||
ContentSummary summary;
|
||||
|
||||
summary = cluster.getNameNodeRpc().getContentSummary(
|
||||
foo.toString());
|
||||
verifySummary(summary, 2, 1, 10);
|
||||
|
||||
dfs.setPermission(foo, new FsPermission((short)0));
|
||||
|
||||
summary = cluster.getNameNodeRpc().getContentSummary(
|
||||
foo.toString());
|
||||
verifySummary(summary, 2, 1, 10);
|
||||
|
||||
dfs.setPermission(bar, new FsPermission((short)0));
|
||||
|
||||
summary = cluster.getNameNodeRpc().getContentSummary(
|
||||
foo.toString());
|
||||
verifySummary(summary, 2, 1, 10);
|
||||
|
||||
dfs.setPermission(baz, new FsPermission((short)0));
|
||||
|
||||
summary = cluster.getNameNodeRpc().getContentSummary(
|
||||
foo.toString());
|
||||
verifySummary(summary, 2, 1, 10);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test getContentSummary for non-super, non-owner. Such users are restricted
|
||||
* by permission of subdirectories. Namely if there is any subdirectory that
|
||||
* does not have READ_EXECUTE access, AccessControlException will be thrown.
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testGetContentSummaryNonSuperUser() throws Exception {
|
||||
final Path foo = new Path("/fooNoneSuper");
|
||||
final Path bar = new Path(foo, "barNoneSuper");
|
||||
final Path baz = new Path(bar, "bazNoneSuper");
|
||||
// run as some random non-superuser, non-owner user.
|
||||
final UserGroupInformation userUgi =
|
||||
UserGroupInformation.createUserForTesting(
|
||||
"randomUser", new String[]{"randomGroup"});
|
||||
dfs.mkdirs(bar);
|
||||
DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
|
||||
|
||||
// by default, permission is rwxr-xr-x, as long as READ and EXECUTE are set,
|
||||
// content summary should accessible
|
||||
FileStatus fileStatus;
|
||||
fileStatus = dfs.getFileStatus(foo);
|
||||
assertEquals((short)755, fileStatus.getPermission().toOctal());
|
||||
fileStatus = dfs.getFileStatus(bar);
|
||||
assertEquals((short)755, fileStatus.getPermission().toOctal());
|
||||
// file has no EXECUTE, it is rw-r--r-- default
|
||||
fileStatus = dfs.getFileStatus(baz);
|
||||
assertEquals((short)644, fileStatus.getPermission().toOctal());
|
||||
|
||||
// by default, can get content summary
|
||||
ContentSummary summary =
|
||||
userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
|
||||
() -> cluster.getNameNodeRpc().getContentSummary(
|
||||
foo.toString()));
|
||||
verifySummary(summary, 2, 1, 10);
|
||||
|
||||
// set empty access on root dir, should disallow content summary
|
||||
dfs.setPermission(foo, new FsPermission((short)0));
|
||||
try {
|
||||
userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
|
||||
() -> cluster.getNameNodeRpc().getContentSummary(
|
||||
foo.toString()));
|
||||
fail("Should've fail due to access control exception.");
|
||||
} catch (AccessControlException e) {
|
||||
assertTrue(e.getMessage().contains("Permission denied"));
|
||||
}
|
||||
|
||||
// restore foo's permission to allow READ_EXECUTE
|
||||
dfs.setPermission(foo,
|
||||
new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
|
||||
|
||||
// set empty access on subdir, should disallow content summary from root dir
|
||||
dfs.setPermission(bar, new FsPermission((short)0));
|
||||
|
||||
try {
|
||||
userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
|
||||
() -> cluster.getNameNodeRpc().getContentSummary(
|
||||
foo.toString()));
|
||||
fail("Should've fail due to access control exception.");
|
||||
} catch (AccessControlException e) {
|
||||
assertTrue(e.getMessage().contains("Permission denied"));
|
||||
}
|
||||
|
||||
// restore the permission of subdir to READ_EXECUTE. enable
|
||||
// getContentSummary again for root
|
||||
dfs.setPermission(bar,
|
||||
new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
|
||||
|
||||
summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
|
||||
() -> cluster.getNameNodeRpc().getContentSummary(
|
||||
foo.toString()));
|
||||
verifySummary(summary, 2, 1, 10);
|
||||
|
||||
// permission of files under the directory does not affect
|
||||
// getContentSummary
|
||||
dfs.setPermission(baz, new FsPermission((short)0));
|
||||
summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
|
||||
() -> cluster.getNameNodeRpc().getContentSummary(
|
||||
foo.toString()));
|
||||
verifySummary(summary, 2, 1, 10);
|
||||
}
|
||||
|
||||
private void verifySummary(ContentSummary summary, int dirCount,
|
||||
int fileCount, int length) {
|
||||
assertEquals(dirCount, summary.getDirectoryCount());
|
||||
assertEquals(fileCount, summary.getFileCount());
|
||||
assertEquals(length, summary.getLength());
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue