HDFS-11641. Reduce cost of audit logging by using FileStatus instead of HdfsFileStatus. Contributed by Daryn Sharp.

This commit is contained in:
Kihwal Lee 2017-05-16 15:45:05 -05:00
parent f3015425d0
commit 82ea3f4545
11 changed files with 107 additions and 62 deletions

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.AclEntryType;
@ -25,7 +26,6 @@ import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
import java.io.IOException; import java.io.IOException;
@ -33,7 +33,7 @@ import java.util.Collections;
import java.util.List; import java.util.List;
class FSDirAclOp { class FSDirAclOp {
static HdfsFileStatus modifyAclEntries( static FileStatus modifyAclEntries(
FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec) FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
throws IOException { throws IOException {
String src = srcArg; String src = srcArg;
@ -58,7 +58,7 @@ class FSDirAclOp {
return fsd.getAuditFileInfo(iip); return fsd.getAuditFileInfo(iip);
} }
static HdfsFileStatus removeAclEntries( static FileStatus removeAclEntries(
FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec) FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
throws IOException { throws IOException {
String src = srcArg; String src = srcArg;
@ -83,7 +83,7 @@ class FSDirAclOp {
return fsd.getAuditFileInfo(iip); return fsd.getAuditFileInfo(iip);
} }
static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg) static FileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
throws IOException { throws IOException {
String src = srcArg; String src = srcArg;
checkAclsConfigFlag(fsd); checkAclsConfigFlag(fsd);
@ -107,7 +107,7 @@ class FSDirAclOp {
return fsd.getAuditFileInfo(iip); return fsd.getAuditFileInfo(iip);
} }
static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg) static FileStatus removeAcl(FSDirectory fsd, final String srcArg)
throws IOException { throws IOException {
String src = srcArg; String src = srcArg;
checkAclsConfigFlag(fsd); checkAclsConfigFlag(fsd);
@ -126,7 +126,7 @@ class FSDirAclOp {
return fsd.getAuditFileInfo(iip); return fsd.getAuditFileInfo(iip);
} }
static HdfsFileStatus setAcl( static FileStatus setAcl(
FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec) FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
throws IOException { throws IOException {
String src = srcArg; String src = srcArg;

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
@ -28,7 +29,6 @@ import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@ -50,7 +50,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENAB
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
public class FSDirAttrOp { public class FSDirAttrOp {
static HdfsFileStatus setPermission( static FileStatus setPermission(
FSDirectory fsd, final String src, FsPermission permission) FSDirectory fsd, final String src, FsPermission permission)
throws IOException { throws IOException {
if (FSDirectory.isExactReservedName(src)) { if (FSDirectory.isExactReservedName(src)) {
@ -70,7 +70,7 @@ public class FSDirAttrOp {
return fsd.getAuditFileInfo(iip); return fsd.getAuditFileInfo(iip);
} }
static HdfsFileStatus setOwner( static FileStatus setOwner(
FSDirectory fsd, String src, String username, String group) FSDirectory fsd, String src, String username, String group)
throws IOException { throws IOException {
if (FSDirectory.isExactReservedName(src)) { if (FSDirectory.isExactReservedName(src)) {
@ -100,7 +100,7 @@ public class FSDirAttrOp {
return fsd.getAuditFileInfo(iip); return fsd.getAuditFileInfo(iip);
} }
static HdfsFileStatus setTimes( static FileStatus setTimes(
FSDirectory fsd, String src, long mtime, long atime) FSDirectory fsd, String src, long mtime, long atime)
throws IOException { throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker(); FSPermissionChecker pc = fsd.getPermissionChecker();
@ -153,13 +153,13 @@ public class FSDirAttrOp {
return isFile; return isFile;
} }
static HdfsFileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm, static FileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm,
String src) throws IOException { String src) throws IOException {
return setStoragePolicy(fsd, bm, src, return setStoragePolicy(fsd, bm, src,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, "unset"); HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, "unset");
} }
static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
String src, final String policyName) throws IOException { String src, final String policyName) throws IOException {
// get the corresponding policy and make sure the policy name is valid // get the corresponding policy and make sure the policy name is valid
BlockStoragePolicy policy = bm.getStoragePolicy(policyName); BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
@ -171,7 +171,7 @@ public class FSDirAttrOp {
return setStoragePolicy(fsd, bm, src, policy.getId(), "set"); return setStoragePolicy(fsd, bm, src, policy.getId(), "set");
} }
static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
String src, final byte policyId, final String operation) String src, final byte policyId, final String operation)
throws IOException { throws IOException {
if (!fsd.isStoragePolicyEnabled()) { if (!fsd.isStoragePolicyEnabled()) {

View File

@ -21,9 +21,9 @@ import com.google.common.base.Preconditions;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
@ -48,7 +48,7 @@ import static org.apache.hadoop.util.Time.now;
*/ */
class FSDirConcatOp { class FSDirConcatOp {
static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs, static FileStatus concat(FSDirectory fsd, String target, String[] srcs,
boolean logRetryCache) throws IOException { boolean logRetryCache) throws IOException {
validatePath(target, srcs); validatePath(target, srcs);
assert srcs != null; assert srcs != null;

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
@ -41,7 +42,6 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@ -143,10 +143,10 @@ final class FSDirEncryptionZoneOp {
* KeyProvider * KeyProvider
* @param logRetryCache whether to record RPC ids in editlog for retry cache * @param logRetryCache whether to record RPC ids in editlog for retry cache
* rebuilding * rebuilding
* @return HdfsFileStatus * @return FileStatus
* @throws IOException * @throws IOException
*/ */
static HdfsFileStatus createEncryptionZone(final FSDirectory fsd, static FileStatus createEncryptionZone(final FSDirectory fsd,
final String srcArg, final FSPermissionChecker pc, final String cipher, final String srcArg, final FSPermissionChecker pc, final String cipher,
final String keyName, final boolean logRetryCache) throws IOException { final String keyName, final boolean logRetryCache) throws IOException {
final CipherSuite suite = CipherSuite.convert(cipher); final CipherSuite suite = CipherSuite.convert(cipher);
@ -177,7 +177,7 @@ final class FSDirEncryptionZoneOp {
* @param pc permission checker to check fs permission * @param pc permission checker to check fs permission
* @return the EZ with file status. * @return the EZ with file status.
*/ */
static Map.Entry<EncryptionZone, HdfsFileStatus> getEZForPath( static Map.Entry<EncryptionZone, FileStatus> getEZForPath(
final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc) final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc)
throws IOException { throws IOException {
final INodesInPath iip; final INodesInPath iip;
@ -192,7 +192,7 @@ final class FSDirEncryptionZoneOp {
} finally { } finally {
fsd.readUnlock(); fsd.readUnlock();
} }
HdfsFileStatus auditStat = fsd.getAuditFileInfo(iip); FileStatus auditStat = fsd.getAuditFileInfo(iip);
return new AbstractMap.SimpleImmutableEntry<>(ret, auditStat); return new AbstractMap.SimpleImmutableEntry<>(ret, auditStat);
} }

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
@ -27,7 +28,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@ -39,7 +39,7 @@ import static org.apache.hadoop.util.Time.now;
class FSDirMkdirOp { class FSDirMkdirOp {
static HdfsFileStatus mkdirs(FSNamesystem fsn, String src, static FileStatus mkdirs(FSNamesystem fsn, String src,
PermissionStatus permissions, boolean createParent) throws IOException { PermissionStatus permissions, boolean createParent) throws IOException {
FSDirectory fsd = fsn.getFSDirectory(); FSDirectory fsd = fsn.getFSDirectory();
if(NameNode.stateChangeLog.isDebugEnabled()) { if(NameNode.stateChangeLog.isDebugEnabled()) {

View File

@ -19,13 +19,13 @@ package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@ -781,18 +781,18 @@ class FSDirRenameOp {
INodesInPath dst, boolean filesDeleted, INodesInPath dst, boolean filesDeleted,
BlocksMapUpdateInfo collectedBlocks) throws IOException { BlocksMapUpdateInfo collectedBlocks) throws IOException {
boolean success = (dst != null); boolean success = (dst != null);
HdfsFileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null; FileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null;
return new RenameResult( return new RenameResult(
success, auditStat, filesDeleted, collectedBlocks); success, auditStat, filesDeleted, collectedBlocks);
} }
static class RenameResult { static class RenameResult {
final boolean success; final boolean success;
final HdfsFileStatus auditStat; final FileStatus auditStat;
final boolean filesDeleted; final boolean filesDeleted;
final BlocksMapUpdateInfo collectedBlocks; final BlocksMapUpdateInfo collectedBlocks;
RenameResult(boolean success, HdfsFileStatus auditStat, RenameResult(boolean success, FileStatus auditStat,
boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) { boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) {
this.success = success; this.success = success;
this.auditStat = auditStat; this.auditStat = auditStat;

View File

@ -17,13 +17,13 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
@ -33,7 +33,7 @@ import static org.apache.hadoop.util.Time.now;
class FSDirSymlinkOp { class FSDirSymlinkOp {
static HdfsFileStatus createSymlinkInt( static FileStatus createSymlinkInt(
FSNamesystem fsn, String target, final String linkArg, FSNamesystem fsn, String target, final String linkArg,
PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
throws IOException { throws IOException {

View File

@ -21,11 +21,11 @@ import java.io.IOException;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@ -337,9 +337,9 @@ final class FSDirTruncateOp {
*/ */
static class TruncateResult { static class TruncateResult {
private final boolean result; private final boolean result;
private final HdfsFileStatus stat; private final FileStatus stat;
public TruncateResult(boolean result, HdfsFileStatus stat) { public TruncateResult(boolean result, FileStatus stat) {
this.result = result; this.result = result;
this.stat = stat; this.stat = stat;
} }
@ -355,7 +355,7 @@ final class FSDirTruncateOp {
/** /**
* @return file information. * @return file information.
*/ */
HdfsFileStatus getFileStatus() { FileStatus getFileStatus() {
return stat; return stat;
} }
} }

View File

@ -21,13 +21,13 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
@ -59,7 +59,7 @@ class FSDirXAttrOp {
* - xAttrs flags * - xAttrs flags
* @throws IOException * @throws IOException
*/ */
static HdfsFileStatus setXAttr( static FileStatus setXAttr(
FSDirectory fsd, String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag, FSDirectory fsd, String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
boolean logRetryCache) boolean logRetryCache)
throws IOException { throws IOException {
@ -153,7 +153,7 @@ class FSDirXAttrOp {
* - xAttr to remove * - xAttr to remove
* @throws IOException * @throws IOException
*/ */
static HdfsFileStatus removeXAttr( static FileStatus removeXAttr(
FSDirectory fsd, String src, XAttr xAttr, boolean logRetryCache) FSDirectory fsd, String src, XAttr xAttr, boolean logRetryCache)
throws IOException { throws IOException {
FSDirXAttrOp.checkXAttrsConfigFlag(fsd); FSDirXAttrOp.checkXAttrsConfigFlag(fsd);

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -38,6 +39,7 @@ import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
@ -1711,10 +1713,45 @@ public class FSDirectory implements Closeable {
} }
} }
HdfsFileStatus getAuditFileInfo(INodesInPath iip) FileStatus getAuditFileInfo(INodesInPath iip)
throws IOException { throws IOException {
return (namesystem.isAuditEnabled() && namesystem.isExternalInvocation()) if (!namesystem.isAuditEnabled() || !namesystem.isExternalInvocation()) {
? FSDirStatAndListingOp.getFileInfo(this, iip, false) : null; return null;
}
final INode inode = iip.getLastINode();
if (inode == null) {
return null;
}
final int snapshot = iip.getPathSnapshotId();
Path symlink = null;
long size = 0; // length is zero for directories
short replication = 0;
long blocksize = 0;
if (inode.isFile()) {
final INodeFile fileNode = inode.asFile();
size = fileNode.computeFileSize(snapshot);
replication = fileNode.getFileReplication(snapshot);
blocksize = fileNode.getPreferredBlockSize();
} else if (inode.isSymlink()) {
symlink = new Path(
DFSUtilClient.bytes2String(inode.asSymlink().getSymlink()));
}
return new FileStatus(
size,
inode.isDirectory(),
replication,
blocksize,
inode.getModificationTime(snapshot),
inode.getAccessTime(snapshot),
inode.getFsPermission(snapshot),
inode.getUserName(snapshot),
inode.getGroupName(snapshot),
symlink,
new Path(iip.getPath()));
} }
/** /**

View File

@ -337,25 +337,33 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
private void logAuditEvent(boolean succeeded, String cmd, String src, private void logAuditEvent(boolean succeeded, String cmd, String src,
String dst, HdfsFileStatus stat) throws IOException { String dst, FileStatus stat) throws IOException {
if (isAuditEnabled() && isExternalInvocation()) { if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(succeeded, Server.getRemoteUser(), Server.getRemoteIp(), logAuditEvent(succeeded, Server.getRemoteUser(), Server.getRemoteIp(),
cmd, src, dst, stat); cmd, src, dst, stat);
} }
} }
private void logAuditEvent(boolean succeeded, private void logAuditEvent(boolean succeeded, String cmd, String src,
UserGroupInformation ugi, InetAddress addr, String cmd, String src, HdfsFileStatus stat) throws IOException {
String dst, HdfsFileStatus stat) { if (!isAuditEnabled() || !isExternalInvocation()) {
return;
}
FileStatus status = null; FileStatus status = null;
if (stat != null) { if (stat != null) {
Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null; Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
Path path = dst != null ? new Path(dst) : new Path(src); Path path = new Path(src);
status = new FileStatus(stat.getLen(), stat.isDir(), status = new FileStatus(stat.getLen(), stat.isDir(),
stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(),
stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
stat.getGroup(), symlink, path); stat.getGroup(), symlink, path);
} }
logAuditEvent(succeeded, cmd, src, null, status);
}
private void logAuditEvent(boolean succeeded,
UserGroupInformation ugi, InetAddress addr, String cmd, String src,
String dst, FileStatus status) {
final String ugiStr = ugi.toString(); final String ugiStr = ugi.toString();
for (AuditLogger logger : auditLoggers) { for (AuditLogger logger : auditLoggers) {
if (logger instanceof HdfsAuditLogger) { if (logger instanceof HdfsAuditLogger) {
@ -1710,7 +1718,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
*/ */
void setPermission(String src, FsPermission permission) throws IOException { void setPermission(String src, FsPermission permission) throws IOException {
final String operationName = "setPermission"; final String operationName = "setPermission";
HdfsFileStatus auditStat; FileStatus auditStat;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -1734,7 +1742,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
void setOwner(String src, String username, String group) void setOwner(String src, String username, String group)
throws IOException { throws IOException {
final String operationName = "setOwner"; final String operationName = "setOwner";
HdfsFileStatus auditStat; FileStatus auditStat;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -1862,7 +1870,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
void concat(String target, String [] srcs, boolean logRetryCache) void concat(String target, String [] srcs, boolean logRetryCache)
throws IOException { throws IOException {
final String operationName = "concat"; final String operationName = "concat";
HdfsFileStatus stat = null; FileStatus stat = null;
boolean success = false; boolean success = false;
writeLock(); writeLock();
try { try {
@ -1887,7 +1895,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
*/ */
void setTimes(String src, long mtime, long atime) throws IOException { void setTimes(String src, long mtime, long atime) throws IOException {
final String operationName = "setTimes"; final String operationName = "setTimes";
HdfsFileStatus auditStat; FileStatus auditStat;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -1915,7 +1923,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
if (!FileSystem.areSymlinksEnabled()) { if (!FileSystem.areSymlinksEnabled()) {
throw new UnsupportedOperationException("Symlinks not supported"); throw new UnsupportedOperationException("Symlinks not supported");
} }
HdfsFileStatus auditStat = null; FileStatus auditStat = null;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -2025,7 +2033,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
*/ */
void setStoragePolicy(String src, String policyName) throws IOException { void setStoragePolicy(String src, String policyName) throws IOException {
final String operationName = "setStoragePolicy"; final String operationName = "setStoragePolicy";
HdfsFileStatus auditStat; FileStatus auditStat;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -2050,7 +2058,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
*/ */
void unsetStoragePolicy(String src) throws IOException { void unsetStoragePolicy(String src) throws IOException {
final String operationName = "unsetStoragePolicy"; final String operationName = "unsetStoragePolicy";
HdfsFileStatus auditStat; FileStatus auditStat;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -2170,7 +2178,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
logAuditEvent(false, "create", src); logAuditEvent(false, "create", src);
throw e; throw e;
} }
logAuditEvent(true, "create", src, null, status); logAuditEvent(true, "create", src, status);
return status; return status;
} }
@ -2917,7 +2925,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
boolean mkdirs(String src, PermissionStatus permissions, boolean mkdirs(String src, PermissionStatus permissions,
boolean createParent) throws IOException { boolean createParent) throws IOException {
final String operationName = "mkdirs"; final String operationName = "mkdirs";
HdfsFileStatus auditStat = null; FileStatus auditStat = null;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -6512,7 +6520,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
void modifyAclEntries(final String src, List<AclEntry> aclSpec) void modifyAclEntries(final String src, List<AclEntry> aclSpec)
throws IOException { throws IOException {
final String operationName = "modifyAclEntries"; final String operationName = "modifyAclEntries";
HdfsFileStatus auditStat = null; FileStatus auditStat = null;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -6533,7 +6541,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
throws IOException { throws IOException {
final String operationName = "removeAclEntries"; final String operationName = "removeAclEntries";
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
HdfsFileStatus auditStat = null; FileStatus auditStat = null;
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
@ -6551,7 +6559,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
void removeDefaultAcl(final String src) throws IOException { void removeDefaultAcl(final String src) throws IOException {
final String operationName = "removeDefaultAcl"; final String operationName = "removeDefaultAcl";
HdfsFileStatus auditStat = null; FileStatus auditStat = null;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -6570,7 +6578,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
void removeAcl(final String src) throws IOException { void removeAcl(final String src) throws IOException {
final String operationName = "removeAcl"; final String operationName = "removeAcl";
HdfsFileStatus auditStat = null; FileStatus auditStat = null;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -6589,7 +6597,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
void setAcl(final String src, List<AclEntry> aclSpec) throws IOException { void setAcl(final String src, List<AclEntry> aclSpec) throws IOException {
final String operationName = "setAcl"; final String operationName = "setAcl";
HdfsFileStatus auditStat = null; FileStatus auditStat = null;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
@ -6643,7 +6651,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
checkSuperuserPrivilege(); checkSuperuserPrivilege();
FSPermissionChecker pc = getPermissionChecker(); FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
final HdfsFileStatus resultingStat; final FileStatus resultingStat;
writeLock(); writeLock();
try { try {
checkSuperuserPrivilege(); checkSuperuserPrivilege();
@ -6674,14 +6682,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
EncryptionZone getEZForPath(final String srcArg) EncryptionZone getEZForPath(final String srcArg)
throws AccessControlException, UnresolvedLinkException, IOException { throws AccessControlException, UnresolvedLinkException, IOException {
final String operationName = "getEZForPath"; final String operationName = "getEZForPath";
HdfsFileStatus resultingStat = null; FileStatus resultingStat = null;
boolean success = false; boolean success = false;
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
Entry<EncryptionZone, HdfsFileStatus> ezForPath = FSDirEncryptionZoneOp Entry<EncryptionZone, FileStatus> ezForPath = FSDirEncryptionZoneOp
.getEZForPath(dir, srcArg, pc); .getEZForPath(dir, srcArg, pc);
success = true; success = true;
resultingStat = ezForPath.getValue(); resultingStat = ezForPath.getValue();
@ -6716,7 +6724,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
boolean logRetryCache) boolean logRetryCache)
throws IOException { throws IOException {
final String operationName = "setXAttr"; final String operationName = "setXAttr";
HdfsFileStatus auditStat = null; FileStatus auditStat = null;
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
@ -6766,7 +6774,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
void removeXAttr(String src, XAttr xAttr, boolean logRetryCache) void removeXAttr(String src, XAttr xAttr, boolean logRetryCache)
throws IOException { throws IOException {
final String operationName = "removeXAttr"; final String operationName = "removeXAttr";
HdfsFileStatus auditStat = null; FileStatus auditStat = null;
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);