From 1ef8d7a638df5150b8426755af034839d5f88ca2 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Mon, 15 Aug 2016 17:01:40 -0500 Subject: [PATCH] HDFS-10744. Internally optimize path component resolution. Contributed by Daryn Sharp. (cherry picked from commit 03dea65e0b17ca2f9460bb6110f6ab3a321b8bf2) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java --- .../hdfs/server/namenode/FSDirAclOp.java | 18 ++-- .../hdfs/server/namenode/FSDirAppendOp.java | 4 +- .../hdfs/server/namenode/FSDirAttrOp.java | 22 ++-- .../hdfs/server/namenode/FSDirDeleteOp.java | 5 +- .../namenode/FSDirEncryptionZoneOp.java | 8 +- .../hdfs/server/namenode/FSDirMkdirOp.java | 3 +- .../hdfs/server/namenode/FSDirRenameOp.java | 12 +-- .../namenode/FSDirStatAndListingOp.java | 27 ++--- .../hdfs/server/namenode/FSDirSymlinkOp.java | 3 +- .../hdfs/server/namenode/FSDirTruncateOp.java | 4 +- .../server/namenode/FSDirWriteFileOp.java | 15 +-- .../hdfs/server/namenode/FSDirXAttrOp.java | 13 +-- .../hdfs/server/namenode/FSDirectory.java | 100 ++++++++---------- .../hdfs/server/namenode/FSNamesystem.java | 16 +-- .../hdfs/server/namenode/TestINodeFile.java | 51 ++++----- 15 files changed, 115 insertions(+), 186 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java index 0c572b54cb7..296bed284dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java @@ -39,8 +39,7 @@ static HdfsFileStatus modifyAclEntries( String src = srcArg; checkAclsConfigFlag(fsd); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip; fsd.writeLock(); try { @@ -65,8 +64,7 @@ static HdfsFileStatus removeAclEntries( String src = srcArg; checkAclsConfigFlag(fsd); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip; fsd.writeLock(); try { @@ -90,8 +88,7 @@ static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg) String src = srcArg; checkAclsConfigFlag(fsd); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip; fsd.writeLock(); try { @@ -115,8 +112,7 @@ static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg) String src = srcArg; checkAclsConfigFlag(fsd); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip; fsd.writeLock(); try { @@ -135,9 +131,8 @@ static HdfsFileStatus setAcl( throws IOException { String src = srcArg; checkAclsConfigFlag(fsd); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = fsd.getPermissionChecker(); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip; fsd.writeLock(); try { @@ -155,8 +150,7 @@ static AclStatus getAclStatus( FSDirectory fsd, String src) throws IOException { checkAclsConfigFlag(fsd); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); String srcs = FSDirectory.normalizePath(src); fsd.readLock(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java index e5b139216e9..f96cf69a58e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java @@ -82,14 +82,12 @@ static LastBlockWithStatus appendFile(final FSNamesystem fsn, final boolean logRetryCache) throws IOException { assert fsn.hasWriteLock(); - final byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(srcArg); final LocatedBlock lb; final FSDirectory fsd = fsn.getFSDirectory(); final String src; fsd.writeLock(); try { - src = fsd.resolvePath(pc, srcArg, pathComponents); + src = fsd.resolvePath(pc, srcArg); final INodesInPath iip = fsd.getINodesInPath4Write(src); // Verify that the destination does not exist as a directory already final INode inode = iip.getLastINode(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index 7575f5ec339..b6b8dae594a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -57,11 +57,10 @@ static HdfsFileStatus setPermission( throw new InvalidPathException(src); } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); INodesInPath iip; fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); iip = fsd.getINodesInPath4Write(src); fsd.checkOwner(pc, iip); unprotectedSetPermission(fsd, src, permission); @@ -79,11 +78,10 @@ static HdfsFileStatus setOwner( throw new InvalidPathException(src); } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); INodesInPath iip; fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); iip = fsd.getINodesInPath4Write(src); fsd.checkOwner(pc, iip); if (!pc.isSuperUser()) { @@ -106,12 +104,11 @@ static HdfsFileStatus setTimes( FSDirectory fsd, String src, long mtime, long atime) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); INodesInPath iip; fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); iip = fsd.getINodesInPath4Write(src); // Write access is required to set access and modification times if (fsd.isPermissionEnabled()) { @@ -139,10 +136,9 @@ static boolean setReplication( bm.verifyReplication(src, replication, null); final boolean isFile; FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath4Write(src); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.WRITE); @@ -187,11 +183,10 @@ static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, DFS_STORAGE_POLICY_ENABLED_KEY)); } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); INodesInPath iip; fsd.writeLock(); try { - src = FSDirectory.resolvePath(src, pathComponents, fsd); + src = FSDirectory.resolvePath(src, fsd); iip = fsd.getINodesInPath4Write(src); if (fsd.isPermissionEnabled()) { @@ -214,11 +209,9 @@ static BlockStoragePolicy[] getStoragePolicies(BlockManager bm) static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm, String path) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(path); fsd.readLock(); try { - path = fsd.resolvePath(pc, path, pathComponents); + path = fsd.resolvePath(pc, path); final INodesInPath iip = fsd.getINodesInPath(path, false); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.READ); @@ -237,10 +230,9 @@ static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm, static long getPreferredBlockSize(FSDirectory fsd, String src) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); fsd.readLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, false); if (fsd.isPermissionEnabled()) { fsd.checkTraverse(pc, iip); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java index 6db2ce811b0..58f7a614ca5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java @@ -97,9 +97,8 @@ static BlocksMapUpdateInfo delete( throws IOException { FSDirectory fsd = fsn.getFSDirectory(); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath4Write(src, false); if (!recursive && fsd.isNonEmptyDirectory(iip)) { throw new PathIsNotEmptyDirectoryException(src + " is non empty"); @@ -109,7 +108,7 @@ static BlocksMapUpdateInfo delete( FsAction.ALL, true); } if (recursive && fsd.isNonEmptyDirectory(iip)) { - checkProtectedDescendants(fsd, fsd.normalizePath(src)); + checkProtectedDescendants(fsd, src); } return deleteInternal(fsn, src, iip, logRetryCache); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java index f3e4d460a55..ba9e9d1cd9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java @@ -146,8 +146,6 @@ static KeyProvider.Metadata ensureKeyIsInitialized(final FSDirectory fsd, static HdfsFileStatus createEncryptionZone(final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc, final String cipher, final String keyName, final boolean logRetryCache) throws IOException { - final byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(srcArg); final CipherSuite suite = CipherSuite.convert(cipher); List xAttrs = Lists.newArrayListWithCapacity(1); final String src; @@ -157,7 +155,7 @@ static HdfsFileStatus createEncryptionZone(final FSDirectory fsd, fsd.writeLock(); try { - src = fsd.resolvePath(pc, srcArg, pathComponents); + src = fsd.resolvePath(pc, srcArg); final XAttr ezXAttr = fsd.ezManager.createEncryptionZone(src, suite, version, keyName); xAttrs.add(ezXAttr); @@ -180,14 +178,12 @@ static HdfsFileStatus createEncryptionZone(final FSDirectory fsd, static Map.Entry getEZForPath( final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc) throws IOException { - final byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(srcArg); final String src; final INodesInPath iip; final EncryptionZone ret; fsd.readLock(); try { - src = fsd.resolvePath(pc, srcArg, pathComponents); + src = fsd.resolvePath(pc, srcArg); iip = fsd.getINodesInPath(src, true); if (iip.getLastINode() == null) { throw new FileNotFoundException("Path not found: " + iip.getPath()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java index c74facaf6a7..11414229920 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java @@ -50,10 +50,9 @@ static HdfsFileStatus mkdirs(FSNamesystem fsn, String src, throw new InvalidPathException(src); } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip = fsd.getINodesInPath4Write(src); if (fsd.isPermissionEnabled()) { fsd.checkTraverse(pc, iip); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index c64dfea710c..e9514ba08ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -64,11 +64,9 @@ static RenameOldResult renameToInt( } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src); - byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst); HdfsFileStatus resultingStat = null; - src = fsd.resolvePath(pc, src, srcComponents); - dst = fsd.resolvePath(pc, dst, dstComponents); + src = fsd.resolvePath(pc, src); + dst = fsd.resolvePath(pc, dst); @SuppressWarnings("deprecation") final boolean status = renameTo(fsd, pc, src, dst, logRetryCache); if (status) { @@ -239,11 +237,9 @@ static Map.Entry renameToInt( } final FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src); - byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); - src = fsd.resolvePath(pc, src, srcComponents); - dst = fsd.resolvePath(pc, dst, dstComponents); + src = fsd.resolvePath(pc, src); + dst = fsd.resolvePath(pc, dst); renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options); INodesInPath dstIIP = fsd.getINodesInPath(dst, false); HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 8c70e8ca388..9df5e878d1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -50,24 +50,20 @@ class FSDirStatAndListingOp { static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg, byte[] startAfter, boolean needLocation) throws IOException { - byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(srcArg); final String startAfterString = DFSUtil.bytes2String(startAfter); String src = null; if (fsd.isPermissionEnabled()) { FSPermissionChecker pc = fsd.getPermissionChecker(); - src = fsd.resolvePath(pc, srcArg, pathComponents); + src = fsd.resolvePath(pc, srcArg); } else { - src = FSDirectory.resolvePath(srcArg, pathComponents, fsd); + src = FSDirectory.resolvePath(srcArg, fsd); } // Get file name when startAfter is an INodePath if (FSDirectory.isReservedName(startAfterString)) { - byte[][] startAfterComponents = FSDirectory - .getPathComponentsForReservedPath(startAfterString); try { - String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd); + String tmp = FSDirectory.resolvePath(startAfterString, fsd); byte[][] regularPath = INode.getPathComponents(tmp); startAfter = regularPath[regularPath.length - 1]; } catch (IOException e) { @@ -108,14 +104,13 @@ static HdfsFileStatus getFileInfo( if (!DFSUtil.isValidName(src)) { throw new InvalidPathException("Invalid file name: " + src); } - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); if (fsd.isPermissionEnabled()) { FSPermissionChecker pc = fsd.getPermissionChecker(); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, srcArg); final INodesInPath iip = fsd.getINodesInPath(src, resolveLink); fsd.checkPermission(pc, iip, false, null, null, null, null, false); } else { - src = FSDirectory.resolvePath(src, pathComponents, fsd); + src = FSDirectory.resolvePath(srcArg, fsd); } return getFileInfo(fsd, src, FSDirectory.isReservedRawName(srcArg), resolveLink); @@ -126,8 +121,7 @@ static HdfsFileStatus getFileInfo( */ static boolean isFileClosed(FSDirectory fsd, String src) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, true); if (fsd.isPermissionEnabled()) { fsd.checkTraverse(pc, iip); @@ -137,9 +131,8 @@ static boolean isFileClosed(FSDirectory fsd, String src) throws IOException { static ContentSummary getContentSummary( FSDirectory fsd, String src) throws IOException { - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = fsd.getPermissionChecker(); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, false); if (fsd.isPermissionEnabled()) { fsd.checkPermission(pc, iip, false, null, null, null, @@ -162,11 +155,10 @@ static GetBlockLocationsResult getBlockLocations( "Negative length is not supported. File: " + src); CacheManager cm = fsd.getFSNamesystem().getCacheManager(); BlockManager bm = fsd.getBlockManager(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); boolean isReservedName = FSDirectory.isReservedRawName(src); fsd.readLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, true); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); if (fsd.isPermissionEnabled()) { @@ -618,12 +610,11 @@ private static ContentSummary getContentSummaryInt(FSDirectory fsd, static QuotaUsage getQuotaUsage( FSDirectory fsd, String src) throws IOException { - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = fsd.getPermissionChecker(); final INodesInPath iip; fsd.readLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); iip = fsd.getINodesInPath(src, false); if (fsd.isPermissionEnabled()) { fsd.checkPermission(pc, iip, false, null, null, null, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java index 44a171aecf0..e78c7b545ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java @@ -53,11 +53,10 @@ static HdfsFileStatus createSymlinkInt( } FSPermissionChecker pc = fsn.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(link); INodesInPath iip; fsd.writeLock(); try { - link = fsd.resolvePath(pc, link, pathComponents); + link = fsd.resolvePath(pc, link); iip = fsd.getINodesInPath4Write(link, false); if (!createParent) { fsd.verifyParentDir(iip, link); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index ec785b1f5c4..15b284c8dba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -71,15 +71,13 @@ static TruncateResult truncate(final FSNamesystem fsn, final String srcArg, assert fsn.hasWriteLock(); FSDirectory fsd = fsn.getFSDirectory(); - byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(srcArg); final String src; final INodesInPath iip; final boolean onBlockBoundary; Block truncateBlock = null; fsd.writeLock(); try { - src = fsd.resolvePath(pc, srcArg, pathComponents); + src = fsd.resolvePath(pc, srcArg); iip = fsd.getINodesInPath4Write(src, true); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.WRITE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index fea119a8b58..3e0cad59885 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -116,8 +116,7 @@ static void persistBlocks( static void abandonBlock( FSDirectory fsd, FSPermissionChecker pc, ExtendedBlock b, long fileId, String src, String holder) throws IOException { - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INode inode; final INodesInPath iip; @@ -178,8 +177,7 @@ static ValidateAddBlockResult validateAddBlock( final byte storagePolicyID; String clientMachine; - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsn.dir.resolvePath(pc, src, pathComponents); + src = fsn.dir.resolvePath(pc, src); FileState fileState = analyzeFileState(fsn, src, fileId, clientName, previous, onRetryBlock); if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) { @@ -341,8 +339,7 @@ static HdfsFileStatus startFile( boolean isRawPath = FSDirectory.isReservedRawName(src); FSDirectory fsd = fsn.getFSDirectory(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip = fsd.getINodesInPath4Write(src); // Verify that the destination does not exist as a directory already. @@ -451,9 +448,8 @@ static EncryptionKeyInfo getEncryptionKeyInfo(FSNamesystem fsn, FSPermissionChecker pc, String src, CryptoProtocolVersion[] supportedVersions) throws IOException { - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSDirectory fsd = fsn.getFSDirectory(); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip = fsd.getINodesInPath4Write(src); // Nothing to do if the path is not within an EZ final EncryptionZone zone = FSDirEncryptionZoneOp.getEZForPath(fsd, iip); @@ -696,8 +692,7 @@ static boolean completeFile(FSNamesystem fsn, FSPermissionChecker pc, src + " for " + holder); } checkBlock(fsn, last); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsn.dir.resolvePath(pc, src, pathComponents); + src = fsn.dir.resolvePath(pc, src); boolean success = completeFileInternal(fsn, src, holder, ExtendedBlock.getLocalBlock(last), fileId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index 92686c5732d..668e9e82648 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -67,8 +67,7 @@ static HdfsFileStatus setXAttr( FSPermissionChecker pc = fsd.getPermissionChecker(); XAttrPermissionFilter.checkPermissionForApi( pc, xAttr, FSDirectory.isReservedRawName(src)); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(xAttr); INodesInPath iip; @@ -95,8 +94,7 @@ static List getXAttrs(FSDirectory fsd, final String srcArg, if (!getAll) { XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath); } - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, true); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.READ); @@ -136,8 +134,7 @@ static List listXAttrs( FSDirXAttrOp.checkXAttrsConfigFlag(fsd); final FSPermissionChecker pc = fsd.getPermissionChecker(); final boolean isRawPath = FSDirectory.isReservedRawName(src); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, true); if (fsd.isPermissionEnabled()) { /* To access xattr names, you need EXECUTE in the owning directory. */ @@ -164,15 +161,13 @@ static HdfsFileStatus removeXAttr( FSPermissionChecker pc = fsd.getPermissionChecker(); XAttrPermissionFilter.checkPermissionForApi( pc, xAttr, FSDirectory.isReservedRawName(src)); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath( - src); List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(xAttr); INodesInPath iip; fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); iip = fsd.getINodesInPath4Write(src); checkXAttrChangeAccess(fsd, iip, xAttr, pc); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 5bf9d05e5a7..ccdcd6d663f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -124,6 +124,8 @@ private static INodeDirectory createRoot(FSNamesystem namesystem) { public final static String DOT_INODES_STRING = ".inodes"; public final static byte[] DOT_INODES = DFSUtil.string2Bytes(DOT_INODES_STRING); + private final static byte[] DOT_DOT = + DFSUtil.string2Bytes(".."); public final static HdfsFileStatus DOT_RESERVED_STATUS = new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770), @@ -517,7 +519,6 @@ void disableQuotaChecks() { * * @param pc The permission checker used when resolving path. * @param path The path to resolve. - * @param pathComponents path components corresponding to the path * @return if the path indicates an inode, return path after replacing up to * with the corresponding path of the inode, else the path * in {@code src} as is. If the path refers to a path in the "raw" @@ -525,12 +526,12 @@ void disableQuotaChecks() { * @throws FileNotFoundException * @throws AccessControlException */ - String resolvePath(FSPermissionChecker pc, String path, byte[][] pathComponents) + String resolvePath(FSPermissionChecker pc, String path) throws FileNotFoundException, AccessControlException { if (isReservedRawName(path) && isPermissionEnabled) { pc.checkSuperuserPrivilege(); } - return resolvePath(path, pathComponents, this); + return resolvePath(path, this); } /** @@ -1289,13 +1290,6 @@ public static byte[][] getPathComponents(INode inode) { return components.toArray(new byte[components.size()][]); } - /** - * @return path components for reserved path, else null. - */ - static byte[][] getPathComponentsForReservedPath(String src) { - return !isReservedName(src) ? null : INode.getPathComponents(src); - } - /** Check if a given inode name is reserved */ public static boolean isReservedName(INode inode) { return CHECK_RESERVED_FILE_NAMES @@ -1321,6 +1315,12 @@ static boolean isReservedInodesName(String src) { Path.SEPARATOR + DOT_INODES_STRING); } + static boolean isReservedName(byte[][] components) { + return (components.length > 2) && + Arrays.equals(INodeDirectory.ROOT_NAME, components[0]) && + Arrays.equals(DOT_RESERVED, components[1]); + } + /** * Resolve a /.reserved/... path to a non-reserved path. *

@@ -1340,7 +1340,6 @@ static boolean isReservedInodesName(String src) { * unencrypted file). * * @param src path that is being processed - * @param pathComponents path components corresponding to the path * @param fsd FSDirectory * @return if the path indicates an inode, return path after replacing up to * with the corresponding path of the inode, else the path @@ -1348,45 +1347,37 @@ static boolean isReservedInodesName(String src) { * directory, return the non-raw pathname. * @throws FileNotFoundException if inodeid is invalid */ - static String resolvePath(String src, byte[][] pathComponents, + static String resolvePath(String src, FSDirectory fsd) throws FileNotFoundException { - final int nComponents = (pathComponents == null) ? - 0 : pathComponents.length; - if (nComponents <= 2) { - return src; - } - if (!Arrays.equals(DOT_RESERVED, pathComponents[1])) { + byte[][] pathComponents = INode.getPathComponents(src); + final int nComponents = pathComponents.length; + if (!isReservedName(pathComponents)) { /* This is not a /.reserved/ path so do nothing. */ - return src; - } - - if (Arrays.equals(DOT_INODES, pathComponents[2])) { + } else if (Arrays.equals(DOT_INODES, pathComponents[2])) { /* It's a /.reserved/.inodes path. */ if (nComponents > 3) { - return resolveDotInodesPath(src, pathComponents, fsd); - } else { - return src; + pathComponents = resolveDotInodesPath(pathComponents, fsd); } } else if (Arrays.equals(RAW, pathComponents[2])) { /* It's /.reserved/raw so strip off the /.reserved/raw prefix. */ if (nComponents == 3) { - return Path.SEPARATOR; + pathComponents = new byte[][]{INodeDirectory.ROOT_NAME}; } else { if (nComponents == 4 && Arrays.equals(DOT_RESERVED, pathComponents[3])) { /* It's /.reserved/raw/.reserved so don't strip */ - return src; } else { - return constructRemainingPath("", pathComponents, 3); + pathComponents = constructRemainingPath( + new byte[][]{INodeDirectory.ROOT_NAME}, pathComponents, 3); } } - } else { - /* It's some sort of /.reserved/ path. Ignore it. */ - return src; } + // this double conversion will be unnecessary when resolving returns + // INodesInPath (needs components byte[][]) + return DFSUtil.byteArray2PathString(pathComponents); } - private static String resolveDotInodesPath(String src, + private static byte[][] resolveDotInodesPath( byte[][] pathComponents, FSDirectory fsd) throws FileNotFoundException { final String inodeId = DFSUtil.bytes2String(pathComponents[3]); @@ -1394,48 +1385,47 @@ private static String resolveDotInodesPath(String src, try { id = Long.parseLong(inodeId); } catch (NumberFormatException e) { - throw new FileNotFoundException("Invalid inode path: " + src); + throw new FileNotFoundException("Invalid inode path: " + + DFSUtil.byteArray2PathString(pathComponents)); } if (id == INodeId.ROOT_INODE_ID && pathComponents.length == 4) { - return Path.SEPARATOR; + return new byte[][]{INodeDirectory.ROOT_NAME}; } INode inode = fsd.getInode(id); if (inode == null) { throw new FileNotFoundException( - "File for given inode path does not exist: " + src); + "File for given inode path does not exist: " + + DFSUtil.byteArray2PathString(pathComponents)); } - + // Handle single ".." for NFS lookup support. if ((pathComponents.length > 4) - && DFSUtil.bytes2String(pathComponents[4]).equals("..")) { + && Arrays.equals(pathComponents[4], DOT_DOT)) { INode parent = inode.getParent(); if (parent == null || parent.getId() == INodeId.ROOT_INODE_ID) { // inode is root, or its parent is root. - return Path.SEPARATOR; - } else { - return parent.getFullPathName(); + return new byte[][]{INodeDirectory.ROOT_NAME}; } + return parent.getPathComponents(); } - - String path = ""; - if (id != INodeId.ROOT_INODE_ID) { - path = inode.getFullPathName(); - } - return constructRemainingPath(path, pathComponents, 4); + return constructRemainingPath( + inode.getPathComponents(), pathComponents, 4); } - private static String constructRemainingPath(String pathPrefix, - byte[][] pathComponents, int startAt) { - - StringBuilder path = new StringBuilder(pathPrefix); - for (int i = startAt; i < pathComponents.length; i++) { - path.append(Path.SEPARATOR).append( - DFSUtil.bytes2String(pathComponents[i])); + private static byte[][] constructRemainingPath(byte[][] components, + byte[][] extraComponents, int startAt) { + int remainder = extraComponents.length - startAt; + if (remainder > 0) { + // grow the array and copy in the remaining components + int pos = components.length; + components = Arrays.copyOf(components, pos + remainder); + System.arraycopy(extraComponents, startAt, components, pos, remainder); } if (NameNode.LOG.isDebugEnabled()) { - NameNode.LOG.debug("Resolved path is " + path); + NameNode.LOG.debug( + "Resolved path is " + DFSUtil.byteArray2PathString(components)); } - return path.toString(); + return components; } INode getINode4DotSnapshot(String src) throws UnresolvedLinkException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index fce2a549bf3..9b4be659630 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1777,8 +1777,6 @@ LocatedBlocks getBlockLocations(String clientMachine, String srcArg, logAuditEvent(true, "open", srcArg); if (!isInSafeMode() && res.updateAccessTime()) { - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath( - srcArg); String src = srcArg; writeLock(); final long now = now(); @@ -1802,7 +1800,7 @@ LocatedBlocks getBlockLocations(String clientMachine, String srcArg, * HDFS-7463. A better fix is to change the edit log of SetTime to * use inode id instead of a path. */ - src = dir.resolvePath(pc, srcArg, pathComponents); + src = dir.resolvePath(pc, srcArg); final INodesInPath iip = dir.getINodesInPath(src, true); INode inode = iip.getLastINode(); boolean updateAccessTime = inode != null && @@ -2277,12 +2275,11 @@ boolean recoverLease(String src, String holder, String clientMachine) boolean skipSync = false; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot recover the lease of " + src); - src = dir.resolvePath(pc, src, pathComponents); + src = dir.resolvePath(pc, src); final INodesInPath iip = dir.getINodesInPath4Write(src); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); if (!inode.isUnderConstruction()) { @@ -2526,14 +2523,13 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, final byte storagePolicyID; final List chosen; checkOperation(OperationCategory.READ); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = getPermissionChecker(); readLock(); try { checkOperation(OperationCategory.READ); //check safe mode checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk); - src = dir.resolvePath(pc, src, pathComponents); + src = dir.resolvePath(pc, src); //check lease final INode inode; @@ -3040,14 +3036,13 @@ void fsync(String src, long fileId, String clientName, long lastBlockLength) throws IOException { NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName); checkOperation(OperationCategory.WRITE); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = getPermissionChecker(); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot fsync file " + src); - src = dir.resolvePath(pc, src, pathComponents); + src = dir.resolvePath(pc, src); final INode inode; if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) { // Older clients may not have given us an inode ID to work with. @@ -6769,11 +6764,10 @@ void removeXAttr(String src, XAttr xAttr, boolean logRetryCache) void checkAccess(String src, FsAction mode) throws IOException { checkOperation(OperationCategory.READ); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { checkOperation(OperationCategory.READ); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = FSDirectory.resolvePath(src, dir); final INodesInPath iip = dir.getINodesInPath(src, true); INode inode = iip.getLastINode(); if (inode == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index aa5e5c5f054..cbf834b377c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -856,9 +856,9 @@ public void testGetPathFromInode() throws QuotaExceededException { byte[][] actual = FSDirectory.getPathComponents(inode); DFSTestUtil.checkComponentsEquals(expected, actual); } - + /** - * Tests for {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)} + * Tests for {@link FSDirectory#resolvePath(String, FSDirectory)} */ @Test public void testInodePath() throws IOException { @@ -868,54 +868,47 @@ public void testInodePath() throws IOException { // For an any inode look up return inode corresponding to "c" from /a/b/c FSDirectory fsd = Mockito.mock(FSDirectory.class); Mockito.doReturn(inode).when(fsd).getInode(Mockito.anyLong()); - - // Null components - assertEquals("/test", FSDirectory.resolvePath("/test", null, fsd)); - + // Tests for FSDirectory#resolvePath() // Non inode regular path - byte[][] components = INode.getPathComponents(path); - String resolvedPath = FSDirectory.resolvePath(path, components, fsd); + String resolvedPath = FSDirectory.resolvePath(path, fsd); assertEquals(path, resolvedPath); - + // Inode path with no trailing separator - components = INode.getPathComponents("/.reserved/.inodes/1"); - resolvedPath = FSDirectory.resolvePath(path, components, fsd); + String testPath = "/.reserved/.inodes/1"; + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals(path, resolvedPath); - + // Inode path with trailing separator - components = INode.getPathComponents("/.reserved/.inodes/1/"); + testPath = "/.reserved/.inodes/1/"; + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals(path, resolvedPath); - + // Inode relative path - components = INode.getPathComponents("/.reserved/.inodes/1/d/e/f"); - resolvedPath = FSDirectory.resolvePath(path, components, fsd); + testPath = "/.reserved/.inodes/1/d/e/f"; + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals("/a/b/c/d/e/f", resolvedPath); - + // A path with just .inodes returns the path as is - String testPath = "/.reserved/.inodes"; - components = INode.getPathComponents(testPath); - resolvedPath = FSDirectory.resolvePath(testPath, components, fsd); + testPath = "/.reserved/.inodes"; + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals(testPath, resolvedPath); - + // Root inode path testPath = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID; - components = INode.getPathComponents(testPath); - resolvedPath = FSDirectory.resolvePath(testPath, components, fsd); + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals("/", resolvedPath); - + // An invalid inode path should remain unresolved testPath = "/.invalid/.inodes/1"; - components = INode.getPathComponents(testPath); - resolvedPath = FSDirectory.resolvePath(testPath, components, fsd); + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals(testPath, resolvedPath); - + // Test path with nonexistent(deleted or wrong id) inode Mockito.doReturn(null).when(fsd).getInode(Mockito.anyLong()); testPath = "/.reserved/.inodes/1234"; - components = INode.getPathComponents(testPath); try { - String realPath = FSDirectory.resolvePath(testPath, components, fsd); + String realPath = FSDirectory.resolvePath(testPath, fsd); fail("Path should not be resolved:" + realPath); } catch (IOException e) { assertTrue(e instanceof FileNotFoundException);