From 82ea3f4545c88f2dc106e63afd6fcd616bb120be Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Tue, 16 May 2017 15:45:05 -0500 Subject: [PATCH] HDFS-11641. Reduce cost of audit logging by using FileStatus instead of HdfsFileStatus. Contributed by Daryn Sharp. --- .../hdfs/server/namenode/FSDirAclOp.java | 12 ++-- .../hdfs/server/namenode/FSDirAttrOp.java | 14 ++--- .../hdfs/server/namenode/FSDirConcatOp.java | 4 +- .../namenode/FSDirEncryptionZoneOp.java | 10 ++-- .../hdfs/server/namenode/FSDirMkdirOp.java | 4 +- .../hdfs/server/namenode/FSDirRenameOp.java | 8 +-- .../hdfs/server/namenode/FSDirSymlinkOp.java | 4 +- .../hdfs/server/namenode/FSDirTruncateOp.java | 8 +-- .../hdfs/server/namenode/FSDirXAttrOp.java | 6 +- .../hdfs/server/namenode/FSDirectory.java | 43 +++++++++++++- .../hdfs/server/namenode/FSNamesystem.java | 56 +++++++++++-------- 11 files changed, 107 insertions(+), 62 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java index 25ca09b9929..a41ff3ece27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; @@ -25,7 +26,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.AclException; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import java.io.IOException; @@ -33,7 +33,7 @@ import java.util.List; class FSDirAclOp { - static HdfsFileStatus modifyAclEntries( + static FileStatus modifyAclEntries( FSDirectory fsd, final String srcArg, List aclSpec) throws IOException { String src = srcArg; @@ -58,7 +58,7 @@ static HdfsFileStatus modifyAclEntries( return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus removeAclEntries( + static FileStatus removeAclEntries( FSDirectory fsd, final String srcArg, List aclSpec) throws IOException { String src = srcArg; @@ -83,7 +83,7 @@ static HdfsFileStatus removeAclEntries( return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg) + static FileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg) throws IOException { String src = srcArg; checkAclsConfigFlag(fsd); @@ -107,7 +107,7 @@ static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg) return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg) + static FileStatus removeAcl(FSDirectory fsd, final String srcArg) throws IOException { String src = srcArg; checkAclsConfigFlag(fsd); @@ -126,7 +126,7 @@ static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg) return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus setAcl( + static FileStatus setAcl( FSDirectory fsd, final String srcArg, List aclSpec) throws IOException { String src = srcArg; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index c9663dcbefb..9e714aff99a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.StorageType; @@ -28,7 +29,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -50,7 +50,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY; public class FSDirAttrOp { - static HdfsFileStatus setPermission( + static FileStatus setPermission( FSDirectory fsd, final String src, FsPermission permission) throws IOException { if (FSDirectory.isExactReservedName(src)) { @@ -70,7 +70,7 @@ static HdfsFileStatus setPermission( return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus setOwner( + static FileStatus setOwner( FSDirectory fsd, String src, String username, String group) throws IOException { if (FSDirectory.isExactReservedName(src)) { @@ -100,7 +100,7 @@ static HdfsFileStatus setOwner( return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus setTimes( + static FileStatus setTimes( FSDirectory fsd, String src, long mtime, long atime) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); @@ -153,13 +153,13 @@ static boolean setReplication( return isFile; } - static HdfsFileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm, + static FileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm, String src) throws IOException { return setStoragePolicy(fsd, bm, src, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, "unset"); } - static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, + static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, String src, final String policyName) throws IOException { // get the corresponding policy and make sure the policy name is valid BlockStoragePolicy policy = bm.getStoragePolicy(policyName); @@ -171,7 +171,7 @@ static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, return setStoragePolicy(fsd, bm, src, policy.getId(), "set"); } - static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, + static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, String src, final byte policyId, final String operation) throws IOException { if (!fsd.isStoragePolicyEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index 3749e8406d7..c51b17838cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -21,9 +21,9 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; @@ -48,7 +48,7 @@ */ class FSDirConcatOp { - static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs, + static FileStatus concat(FSDirectory fsd, String target, String[] srcs, boolean logRetryCache) throws IOException { validatePath(target, srcs); assert srcs != null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java index d5f6be01b27..22039d15db3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java @@ -34,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; @@ -41,7 +42,6 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.EncryptionZone; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; @@ -143,10 +143,10 @@ static KeyProvider.Metadata ensureKeyIsInitialized(final FSDirectory fsd, * KeyProvider * @param logRetryCache whether to record RPC ids in editlog for retry cache * rebuilding - * @return HdfsFileStatus + * @return FileStatus * @throws IOException */ - static HdfsFileStatus createEncryptionZone(final FSDirectory fsd, + static FileStatus createEncryptionZone(final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc, final String cipher, final String keyName, final boolean logRetryCache) throws IOException { final CipherSuite suite = CipherSuite.convert(cipher); @@ -177,7 +177,7 @@ static HdfsFileStatus createEncryptionZone(final FSDirectory fsd, * @param pc permission checker to check fs permission * @return the EZ with file status. */ - static Map.Entry getEZForPath( + static Map.Entry getEZForPath( final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc) throws IOException { final INodesInPath iip; @@ -192,7 +192,7 @@ static Map.Entry getEZForPath( } finally { fsd.readUnlock(); } - HdfsFileStatus auditStat = fsd.getAuditFileInfo(iip); + FileStatus auditStat = fsd.getAuditFileInfo(iip); return new AbstractMap.SimpleImmutableEntry<>(ret, auditStat); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java index a7aa293a129..02dd46e2e3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java @@ -19,6 +19,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.AclEntry; @@ -27,7 +28,6 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.AclException; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -39,7 +39,7 @@ class FSDirMkdirOp { - static HdfsFileStatus mkdirs(FSNamesystem fsn, String src, + static FileStatus mkdirs(FSNamesystem fsn, String src, PermissionStatus permissions, boolean createParent) throws IOException { FSDirectory fsd = fsn.getFSDirectory(); if(NameNode.stateChangeLog.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index 3beb3c09e82..bbbb7247ffd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -19,13 +19,13 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; @@ -781,18 +781,18 @@ private static RenameResult createRenameResult(FSDirectory fsd, INodesInPath dst, boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) throws IOException { boolean success = (dst != null); - HdfsFileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null; + FileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null; return new RenameResult( success, auditStat, filesDeleted, collectedBlocks); } static class RenameResult { final boolean success; - final HdfsFileStatus auditStat; + final FileStatus auditStat; final boolean filesDeleted; final BlocksMapUpdateInfo collectedBlocks; - RenameResult(boolean success, HdfsFileStatus auditStat, + RenameResult(boolean success, FileStatus auditStat, boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) { this.success = success; this.auditStat = auditStat; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java index c5a738259a4..8c02269adba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; @@ -33,7 +33,7 @@ class FSDirSymlinkOp { - static HdfsFileStatus createSymlinkInt( + static FileStatus createSymlinkInt( FSNamesystem fsn, String target, final String linkArg, PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index 7297ec5b2e0..018719a3481 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -21,11 +21,11 @@ import java.util.Set; import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -337,9 +337,9 @@ private static boolean shouldCopyOnTruncate(FSNamesystem fsn, INodeFile file, */ static class TruncateResult { private final boolean result; - private final HdfsFileStatus stat; + private final FileStatus stat; - public TruncateResult(boolean result, HdfsFileStatus stat) { + public TruncateResult(boolean result, FileStatus stat) { this.result = result; this.stat = stat; } @@ -355,7 +355,7 @@ boolean getResult() { /** * @return file information. */ - HdfsFileStatus getFileStatus() { + FileStatus getFileStatus() { return stat; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index f676f366c48..e5243eed5e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -21,13 +21,13 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.XAttrHelper; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; @@ -59,7 +59,7 @@ class FSDirXAttrOp { * - xAttrs flags * @throws IOException */ - static HdfsFileStatus setXAttr( + static FileStatus setXAttr( FSDirectory fsd, String src, XAttr xAttr, EnumSet flag, boolean logRetryCache) throws IOException { @@ -153,7 +153,7 @@ static List listXAttrs( * - xAttr to remove * @throws IOException */ - static HdfsFileStatus removeXAttr( + static FileStatus removeXAttr( FSDirectory fsd, String src, XAttr xAttr, boolean logRetryCache) throws IOException { FSDirXAttrOp.checkXAttrsConfigFlag(fsd); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 46686b6e38f..2ceec51e9a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; @@ -38,6 +39,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; @@ -1711,10 +1713,45 @@ void checkUnreadableBySuperuser(FSPermissionChecker pc, INodesInPath iip) } } - HdfsFileStatus getAuditFileInfo(INodesInPath iip) + FileStatus getAuditFileInfo(INodesInPath iip) throws IOException { - return (namesystem.isAuditEnabled() && namesystem.isExternalInvocation()) - ? FSDirStatAndListingOp.getFileInfo(this, iip, false) : null; + if (!namesystem.isAuditEnabled() || !namesystem.isExternalInvocation()) { + return null; + } + + final INode inode = iip.getLastINode(); + if (inode == null) { + return null; + } + final int snapshot = iip.getPathSnapshotId(); + + Path symlink = null; + long size = 0; // length is zero for directories + short replication = 0; + long blocksize = 0; + + if (inode.isFile()) { + final INodeFile fileNode = inode.asFile(); + size = fileNode.computeFileSize(snapshot); + replication = fileNode.getFileReplication(snapshot); + blocksize = fileNode.getPreferredBlockSize(); + } else if (inode.isSymlink()) { + symlink = new Path( + DFSUtilClient.bytes2String(inode.asSymlink().getSymlink())); + } + + return new FileStatus( + size, + inode.isDirectory(), + replication, + blocksize, + inode.getModificationTime(snapshot), + inode.getAccessTime(snapshot), + inode.getFsPermission(snapshot), + inode.getUserName(snapshot), + inode.getGroupName(snapshot), + symlink, + new Path(iip.getPath())); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 88398ca45b1..093d6f743cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -337,25 +337,33 @@ private void logAuditEvent(boolean succeeded, String cmd, String src) } private void logAuditEvent(boolean succeeded, String cmd, String src, - String dst, HdfsFileStatus stat) throws IOException { + String dst, FileStatus stat) throws IOException { if (isAuditEnabled() && isExternalInvocation()) { logAuditEvent(succeeded, Server.getRemoteUser(), Server.getRemoteIp(), cmd, src, dst, stat); } } - private void logAuditEvent(boolean succeeded, - UserGroupInformation ugi, InetAddress addr, String cmd, String src, - String dst, HdfsFileStatus stat) { + private void logAuditEvent(boolean succeeded, String cmd, String src, + HdfsFileStatus stat) throws IOException { + if (!isAuditEnabled() || !isExternalInvocation()) { + return; + } FileStatus status = null; if (stat != null) { Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null; - Path path = dst != null ? new Path(dst) : new Path(src); + Path path = new Path(src); status = new FileStatus(stat.getLen(), stat.isDir(), stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getGroup(), symlink, path); } + logAuditEvent(succeeded, cmd, src, null, status); + } + + private void logAuditEvent(boolean succeeded, + UserGroupInformation ugi, InetAddress addr, String cmd, String src, + String dst, FileStatus status) { final String ugiStr = ugi.toString(); for (AuditLogger logger : auditLoggers) { if (logger instanceof HdfsAuditLogger) { @@ -1710,7 +1718,7 @@ FsServerDefaults getServerDefaults() throws StandbyException { */ void setPermission(String src, FsPermission permission) throws IOException { final String operationName = "setPermission"; - HdfsFileStatus auditStat; + FileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -1734,7 +1742,7 @@ void setPermission(String src, FsPermission permission) throws IOException { void setOwner(String src, String username, String group) throws IOException { final String operationName = "setOwner"; - HdfsFileStatus auditStat; + FileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -1862,7 +1870,7 @@ LocatedBlocks getBlockLocations(String clientMachine, String srcArg, void concat(String target, String [] srcs, boolean logRetryCache) throws IOException { final String operationName = "concat"; - HdfsFileStatus stat = null; + FileStatus stat = null; boolean success = false; writeLock(); try { @@ -1887,7 +1895,7 @@ void concat(String target, String [] srcs, boolean logRetryCache) */ void setTimes(String src, long mtime, long atime) throws IOException { final String operationName = "setTimes"; - HdfsFileStatus auditStat; + FileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -1915,7 +1923,7 @@ void createSymlink(String target, String link, if (!FileSystem.areSymlinksEnabled()) { throw new UnsupportedOperationException("Symlinks not supported"); } - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -2025,7 +2033,7 @@ boolean truncate(String src, long newLength, String clientName, */ void setStoragePolicy(String src, String policyName) throws IOException { final String operationName = "setStoragePolicy"; - HdfsFileStatus auditStat; + FileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -2050,7 +2058,7 @@ void setStoragePolicy(String src, String policyName) throws IOException { */ void unsetStoragePolicy(String src) throws IOException { final String operationName = "unsetStoragePolicy"; - HdfsFileStatus auditStat; + FileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -2170,7 +2178,7 @@ HdfsFileStatus startFile(String src, PermissionStatus permissions, logAuditEvent(false, "create", src); throw e; } - logAuditEvent(true, "create", src, null, status); + logAuditEvent(true, "create", src, status); return status; } @@ -2917,7 +2925,7 @@ boolean isFileClosed(final String src) throws IOException { boolean mkdirs(String src, PermissionStatus permissions, boolean createParent) throws IOException { final String operationName = "mkdirs"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -6512,7 +6520,7 @@ BatchedListEntries listCachePools(String prevKey) void modifyAclEntries(final String src, List aclSpec) throws IOException { final String operationName = "modifyAclEntries"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -6533,7 +6541,7 @@ void removeAclEntries(final String src, List aclSpec) throws IOException { final String operationName = "removeAclEntries"; checkOperation(OperationCategory.WRITE); - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6551,7 +6559,7 @@ void removeAclEntries(final String src, List aclSpec) void removeDefaultAcl(final String src) throws IOException { final String operationName = "removeDefaultAcl"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -6570,7 +6578,7 @@ void removeDefaultAcl(final String src) throws IOException { void removeAcl(final String src) throws IOException { final String operationName = "removeAcl"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -6589,7 +6597,7 @@ void removeAcl(final String src) throws IOException { void setAcl(final String src, List aclSpec) throws IOException { final String operationName = "setAcl"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -6643,7 +6651,7 @@ void createEncryptionZone(final String src, final String keyName, checkSuperuserPrivilege(); FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); - final HdfsFileStatus resultingStat; + final FileStatus resultingStat; writeLock(); try { checkSuperuserPrivilege(); @@ -6674,14 +6682,14 @@ void createEncryptionZone(final String src, final String keyName, EncryptionZone getEZForPath(final String srcArg) throws AccessControlException, UnresolvedLinkException, IOException { final String operationName = "getEZForPath"; - HdfsFileStatus resultingStat = null; + FileStatus resultingStat = null; boolean success = false; final FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); - Entry ezForPath = FSDirEncryptionZoneOp + Entry ezForPath = FSDirEncryptionZoneOp .getEZForPath(dir, srcArg, pc); success = true; resultingStat = ezForPath.getValue(); @@ -6716,7 +6724,7 @@ void setXAttr(String src, XAttr xAttr, EnumSet flag, boolean logRetryCache) throws IOException { final String operationName = "setXAttr"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6766,7 +6774,7 @@ List listXAttrs(String src) throws IOException { void removeXAttr(String src, XAttr xAttr, boolean logRetryCache) throws IOException { final String operationName = "removeXAttr"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE);