From bff39ca718706526880a5e64a3eee8642d1db9ef Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Fri, 14 Jul 2017 14:39:12 -0700 Subject: [PATCH] HDFS-12130. Optimizing permission check for getContentSummary. Contributed by Chen Liang --- .../blockmanagement/BlockCollection.java | 4 +- .../ContentSummaryComputationContext.java | 18 ++ .../namenode/DirectoryWithQuotaFeature.java | 4 +- .../namenode/FSDirStatAndListingOp.java | 9 +- .../server/namenode/FSPermissionChecker.java | 32 +++ .../hadoop/hdfs/server/namenode/INode.java | 9 +- .../hdfs/server/namenode/INodeDirectory.java | 9 +- .../hdfs/server/namenode/INodeReference.java | 3 +- .../DirectorySnapshottableFeature.java | 3 +- .../DirectoryWithSnapshotFeature.java | 3 +- .../server/namenode/snapshot/Snapshot.java | 4 +- .../TestGetContentSummaryWithPermission.java | 222 ++++++++++++++++++ 12 files changed, 304 insertions(+), 16 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index 95d99838a2f..2502cf3d42f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.security.AccessControlException; /** * This interface is used by the block manager to expose a @@ -36,7 +37,8 @@ public interface BlockCollection { /** * Get content summary. */ - public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps); + public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) + throws AccessControlException; /** * @return the number of blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java index b35270d3534..af2ea31070f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java @@ -20,7 +20,9 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.security.AccessControlException; @InterfaceAudience.Private @InterfaceStability.Unstable @@ -36,6 +38,7 @@ public class ContentSummaryComputationContext { private long sleepMilliSec = 0; private int sleepNanoSec = 0; + private FSPermissionChecker pc; /** * Constructor * @@ -47,6 +50,12 @@ public class ContentSummaryComputationContext { */ public ContentSummaryComputationContext(FSDirectory dir, FSNamesystem fsn, long limitPerRun, long sleepMicroSec) { + this(dir, fsn, limitPerRun, sleepMicroSec, null); + } + + public ContentSummaryComputationContext(FSDirectory dir, + FSNamesystem fsn, long limitPerRun, long sleepMicroSec, + FSPermissionChecker pc) { this.dir = dir; this.fsn = fsn; this.limitPerRun = limitPerRun; @@ -55,6 +64,7 @@ public class ContentSummaryComputationContext { this.snapshotCounts = new ContentCounts.Builder().build(); this.sleepMilliSec = sleepMicroSec/1000; this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000); + this.pc = pc; } /** Constructor for blocking computation. */ @@ -138,4 +148,12 @@ public class ContentSummaryComputationContext { return (bsps != null) ? bsps: fsn.getBlockManager().getStoragePolicySuite(); } + + void checkPermission(INodeDirectory inode, int snapshotId, FsAction access) + throws AccessControlException { + if (dir != null && dir.isPermissionEnabled() + && pc != null && !pc.isSuperUser()) { + pc.checkPermission(inode, snapshotId, access); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java index 31b45ad758e..0968c65cd0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.EnumCounters; +import org.apache.hadoop.security.AccessControlException; /** * Quota feature for {@link INodeDirectory}. @@ -125,7 +126,8 @@ public final class DirectoryWithQuotaFeature implements INode.Feature { } ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir, - final ContentSummaryComputationContext summary) { + final ContentSummaryComputationContext summary) + throws AccessControlException { final long original = summary.getCounts().getStoragespace(); long oldYieldCount = summary.getYieldCount(); dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 2eb11150351..4ada2be132b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -125,10 +125,8 @@ class FSDirStatAndListingOp { FSDirectory fsd, String src) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK); - if (fsd.isPermissionEnabled()) { - fsd.checkPermission(pc, iip, false, null, null, null, - FsAction.READ_EXECUTE); - } + // getContentSummaryInt() call will check access (if enabled) when + // traversing all sub directories. return getContentSummaryInt(fsd, iip); } @@ -503,7 +501,8 @@ class FSDirStatAndListingOp { // processed. 0 means disabled. I.e. blocking for the entire duration. ContentSummaryComputationContext cscc = new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(), - fsd.getContentCountLimit(), fsd.getContentSleepMicroSec()); + fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(), + fsd.getPermissionChecker()); ContentSummary cs = targetNode.computeAndConvertContentSummary( iip.getPathSnapshotId(), cscc); fsd.addYieldCount(cscc.getYieldCount()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 107d5630532..e3903073e6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -191,6 +191,38 @@ class FSPermissionChecker implements AccessControlEnforcer { ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } + /** + * Check permission only for the given inode (not checking the children's + * access). + * + * @param inode the inode to check. + * @param snapshotId the snapshot id. + * @param access the target access. + * @throws AccessControlException + */ + void checkPermission(INode inode, int snapshotId, FsAction access) + throws AccessControlException { + try { + byte[][] localComponents = {inode.getLocalNameBytes()}; + INodeAttributes[] iNodeAttr = {inode.getSnapshotINode(snapshotId)}; + AccessControlEnforcer enforcer = getAccessControlEnforcer(); + enforcer.checkPermission( + fsOwner, supergroup, callerUgi, + iNodeAttr, // single inode attr in the array + new INode[]{inode}, // single inode in the array + localComponents, snapshotId, + null, -1, // this will skip checkTraverse() because + // not checking ancestor here + false, null, null, + access, // the target access to be checked against the inode + null, // passing null sub access avoids checking children + false); + } catch (AccessControlException ace) { + throw new AccessControlException( + toAccessControlString(inode, inode.getFullPathName(), access)); + } + } + @Override public void checkPermission(String fsOwner, String supergroup, UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index c6258a173b3..2d6816e58f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.Diff; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.util.StringUtils; @@ -418,7 +419,8 @@ public abstract class INode implements INodeAttributes, Diff.Element { public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext); /** Compute {@link ContentSummary}. Blocking call */ - public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) { + public final ContentSummary computeContentSummary( + BlockStoragePolicySuite bsps) throws AccessControlException { return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID, new ContentSummaryComputationContext(bsps)); } @@ -427,7 +429,7 @@ public abstract class INode implements INodeAttributes, Diff.Element { * Compute {@link ContentSummary}. */ public final ContentSummary computeAndConvertContentSummary(int snapshotId, - ContentSummaryComputationContext summary) { + ContentSummaryComputationContext summary) throws AccessControlException { computeContentSummary(snapshotId, summary); final ContentCounts counts = summary.getCounts(); final ContentCounts snapshotCounts = summary.getSnapshotCounts(); @@ -460,7 +462,8 @@ public abstract class INode implements INodeAttributes, Diff.Element { * @return The same objects as summary. */ public abstract ContentSummaryComputationContext computeContentSummary( - int snapshotId, ContentSummaryComputationContext summary); + int snapshotId, ContentSummaryComputationContext summary) + throws AccessControlException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index c34544023dd..19fd3e1e9a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.fs.PathIsNotDirectoryException; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttr; @@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.hadoop.security.AccessControlException; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; @@ -630,7 +632,7 @@ public class INodeDirectory extends INodeWithAdditionalFields @Override public ContentSummaryComputationContext computeContentSummary(int snapshotId, - ContentSummaryComputationContext summary) { + ContentSummaryComputationContext summary) throws AccessControlException { final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) { final ContentCounts counts = new ContentCounts.Builder().build(); @@ -652,7 +654,10 @@ public class INodeDirectory extends INodeWithAdditionalFields } protected ContentSummaryComputationContext computeDirectoryContentSummary( - ContentSummaryComputationContext summary, int snapshotId) { + ContentSummaryComputationContext summary, int snapshotId) + throws AccessControlException{ + // throws exception if failing the permission check + summary.checkPermission(this, snapshotId, FsAction.READ_EXECUTE); ReadOnlyList childrenList = getChildrenList(snapshotId); // Explicit traversing is done to enable repositioning after relinquishing // and reacquiring locks. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 1b852374c5d..db2026da415 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeat import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import com.google.common.base.Preconditions; +import org.apache.hadoop.security.AccessControlException; /** * An anonymous reference to an inode. @@ -314,7 +315,7 @@ public abstract class INodeReference extends INode { @Override public ContentSummaryComputationContext computeContentSummary(int snapshotId, - ContentSummaryComputationContext summary) { + ContentSummaryComputationContext summary) throws AccessControlException { return referred.computeContentSummary(snapshotId, summary); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java index 39db979ae02..f26b7b4e1fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; import org.apache.hadoop.hdfs.util.Diff.ListType; import org.apache.hadoop.hdfs.util.ReadOnlyList; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -221,7 +222,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature @Override public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps, - final ContentCounts counts) { + final ContentCounts counts) throws AccessControlException { counts.addContent(Content.SNAPSHOT, snapshotsByNames.size()); counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1); super.computeContentSummary4Snapshot(bsps, counts); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java index 0111b3bfb65..7535879d092 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.util.Diff.UndoInfo; import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.base.Preconditions; +import org.apache.hadoop.security.AccessControlException; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID; @@ -630,7 +631,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature { } public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps, - final ContentCounts counts) { + final ContentCounts counts) throws AccessControlException { // Create a new blank summary context for blocking processing of subtree. ContentSummaryComputationContext summary = new ContentSummaryComputationContext(bsps); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java index 1d1948a4302..1fa884cbc98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.base.Predicate; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; +import org.apache.hadoop.security.AccessControlException; /** Snapshot of a sub-tree in the namesystem. */ @InterfaceAudience.Private @@ -176,7 +177,8 @@ public class Snapshot implements Comparable { @Override public ContentSummaryComputationContext computeContentSummary( - int snapshotId, ContentSummaryComputationContext summary) { + int snapshotId, ContentSummaryComputationContext summary) + throws AccessControlException { return computeDirectoryContentSummary(summary, snapshotId); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java new file mode 100644 index 00000000000..2ccb90c13d2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * This class tests get content summary with permission settings. + */ +public class TestGetContentSummaryWithPermission { + protected static final short REPLICATION = 3; + protected static final long BLOCKSIZE = 1024; + + private Configuration conf; + private MiniDFSCluster cluster; + private DistributedFileSystem dfs; + + @Before + public void setUp() throws Exception { + conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); + cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build(); + cluster.waitActive(); + + dfs = cluster.getFileSystem(); + } + + @After + public void tearDown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + /** + * Test getContentSummary for super user. For super user, whatever + * permission the directories are with, always allowed to access + * + * @throws Exception + */ + @Test + public void testGetContentSummarySuperUser() throws Exception { + final Path foo = new Path("/fooSuper"); + final Path bar = new Path(foo, "barSuper"); + final Path baz = new Path(bar, "bazSuper"); + dfs.mkdirs(bar); + DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L); + + ContentSummary summary; + + summary = cluster.getNameNodeRpc().getContentSummary( + foo.toString()); + verifySummary(summary, 2, 1, 10); + + dfs.setPermission(foo, new FsPermission((short)0)); + + summary = cluster.getNameNodeRpc().getContentSummary( + foo.toString()); + verifySummary(summary, 2, 1, 10); + + dfs.setPermission(bar, new FsPermission((short)0)); + + summary = cluster.getNameNodeRpc().getContentSummary( + foo.toString()); + verifySummary(summary, 2, 1, 10); + + dfs.setPermission(baz, new FsPermission((short)0)); + + summary = cluster.getNameNodeRpc().getContentSummary( + foo.toString()); + verifySummary(summary, 2, 1, 10); + } + + /** + * Test getContentSummary for non-super, non-owner. Such users are restricted + * by permission of subdirectories. Namely if there is any subdirectory that + * does not have READ_EXECUTE access, AccessControlException will be thrown. + * + * @throws Exception + */ + @Test + public void testGetContentSummaryNonSuperUser() throws Exception { + final Path foo = new Path("/fooNoneSuper"); + final Path bar = new Path(foo, "barNoneSuper"); + final Path baz = new Path(bar, "bazNoneSuper"); + // run as some random non-superuser, non-owner user. + final UserGroupInformation userUgi = + UserGroupInformation.createUserForTesting( + "randomUser", new String[]{"randomGroup"}); + dfs.mkdirs(bar); + DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L); + + // by default, permission is rwxr-xr-x, as long as READ and EXECUTE are set, + // content summary should accessible + FileStatus fileStatus; + fileStatus = dfs.getFileStatus(foo); + assertEquals((short)755, fileStatus.getPermission().toOctal()); + fileStatus = dfs.getFileStatus(bar); + assertEquals((short)755, fileStatus.getPermission().toOctal()); + // file has no EXECUTE, it is rw-r--r-- default + fileStatus = dfs.getFileStatus(baz); + assertEquals((short)644, fileStatus.getPermission().toOctal()); + + // by default, can get content summary + ContentSummary summary = + userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public ContentSummary run() throws IOException { + return cluster.getNameNodeRpc().getContentSummary( + foo.toString()); + } + }); + verifySummary(summary, 2, 1, 10); + + // set empty access on root dir, should disallow content summary + dfs.setPermission(foo, new FsPermission((short)0)); + try { + userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public ContentSummary run() throws IOException { + return cluster.getNameNodeRpc().getContentSummary( + foo.toString()); + } + }); + fail("Should've fail due to access control exception."); + } catch (AccessControlException e) { + assertTrue(e.getMessage().contains("Permission denied")); + } + + // restore foo's permission to allow READ_EXECUTE + dfs.setPermission(foo, + new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE)); + + // set empty access on subdir, should disallow content summary from root dir + dfs.setPermission(bar, new FsPermission((short)0)); + + try { + userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public ContentSummary run() throws IOException { + return cluster.getNameNodeRpc().getContentSummary( + foo.toString()); + } + }); + fail("Should've fail due to access control exception."); + } catch (AccessControlException e) { + assertTrue(e.getMessage().contains("Permission denied")); + } + + // restore the permission of subdir to READ_EXECUTE. enable + // getContentSummary again for root + dfs.setPermission(bar, + new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE)); + + summary = userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public ContentSummary run() throws IOException { + return cluster.getNameNodeRpc().getContentSummary( + foo.toString()); + } + }); + verifySummary(summary, 2, 1, 10); + + // permission of files under the directory does not affect + // getContentSummary + dfs.setPermission(baz, new FsPermission((short)0)); + summary = userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public ContentSummary run() throws IOException { + return cluster.getNameNodeRpc().getContentSummary( + foo.toString()); + } + }); + verifySummary(summary, 2, 1, 10); + } + + private void verifySummary(ContentSummary summary, int dirCount, + int fileCount, int length) { + assertEquals(dirCount, summary.getDirectoryCount()); + assertEquals(fileCount, summary.getFileCount()); + assertEquals(length, summary.getLength()); + } + +}