diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java index 3a5d7dcd32a..5192352a504 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java @@ -85,9 +85,10 @@ final class FSDirAppendOp { final LocatedBlock lb; final FSDirectory fsd = fsn.getFSDirectory(); final String src; + final INodesInPath iip; fsd.writeLock(); try { - final INodesInPath iip = fsd.resolvePathForWrite(pc, srcArg); + iip = fsd.resolvePathForWrite(pc, srcArg); src = iip.getPath(); // Verify that the destination does not exist as a directory already final INode inode = iip.getLastINode(); @@ -148,8 +149,7 @@ final class FSDirAppendOp { fsd.writeUnlock(); } - HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, src, false, - FSDirectory.isReservedRawName(srcArg)); + HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip); if (lb != null) { NameNode.stateChangeLog.debug( "DIR* NameSystem.appendFile: file {} for {} at {} block {} block" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index c9eedf517a3..8a9393e3ed4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -108,16 +108,16 @@ class FSDirStatAndListingOp { if (!DFSUtil.isValidName(src)) { throw new InvalidPathException("Invalid file name: " + src); } + final INodesInPath iip; if (fsd.isPermissionEnabled()) { FSPermissionChecker pc = fsd.getPermissionChecker(); - final INodesInPath iip = fsd.resolvePath(pc, srcArg, resolveLink); - src = iip.getPath(); + iip = fsd.resolvePath(pc, srcArg, resolveLink); fsd.checkPermission(pc, iip, false, null, null, null, null, false); } else { src = FSDirectory.resolvePath(srcArg, fsd); + iip = fsd.getINodesInPath(src, resolveLink); } - return getFileInfo(fsd, src, FSDirectory.isReservedRawName(srcArg), - resolveLink); + return getFileInfo(fsd, iip); } /** @@ -230,7 +230,6 @@ class FSDirStatAndListingOp { String src, byte[] startAfter, boolean needLocation, boolean isSuperUser) throws IOException { String srcs = FSDirectory.normalizePath(src); - final boolean isRawPath = FSDirectory.isReservedRawName(src); if (FSDirectory.isExactReservedName(srcs)) { return getReservedListing(fsd); } @@ -257,7 +256,7 @@ class FSDirStatAndListingOp { return new DirectoryListing( new HdfsFileStatus[]{ createFileStatus( fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs, - needLocation, parentStoragePolicy, snapshot, isRawPath, iip) + needLocation, parentStoragePolicy, iip) }, 0); } @@ -282,7 +281,7 @@ class FSDirStatAndListingOp { cur.getLocalNameBytes()); listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), nodeAttrs, needLocation, getStoragePolicyID(curPolicy, parentStoragePolicy), - snapshot, isRawPath, iipWithChild); + iipWithChild); listingCnt++; if (needLocation) { // Once we hit lsLimit locations, stop. @@ -339,7 +338,6 @@ class FSDirStatAndListingOp { listing[i] = createFileStatus( fsd, sRoot.getLocalNameBytes(), nodeAttrs, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, - Snapshot.CURRENT_STATE_ID, false, INodesInPath.fromINode(sRoot)); } return new DirectoryListing( @@ -363,10 +361,8 @@ class FSDirStatAndListingOp { * @return object containing information regarding the file * or null if file not found */ - static HdfsFileStatus getFileInfo( - FSDirectory fsd, String path, INodesInPath iip, boolean isRawPath, - boolean includeStoragePolicy) - throws IOException { + static HdfsFileStatus getFileInfo(FSDirectory fsd, + INodesInPath iip, boolean includeStoragePolicy) throws IOException { fsd.readLock(); try { final INode node = iip.getLastINode(); @@ -377,23 +373,21 @@ class FSDirStatAndListingOp { byte policyId = includeStoragePolicy && !node.isSymlink() ? node.getStoragePolicyID() : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; - INodeAttributes nodeAttrs = getINodeAttributes(fsd, path, + INodeAttributes nodeAttrs = getINodeAttributes(fsd, iip.getPath(), HdfsFileStatus.EMPTY_NAME, node, iip.getPathSnapshotId()); return createFileStatus(fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs, - policyId, iip.getPathSnapshotId(), isRawPath, iip); + policyId, iip); } finally { fsd.readUnlock(); } } - static HdfsFileStatus getFileInfo( - FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath) + static HdfsFileStatus getFileInfo(FSDirectory fsd, INodesInPath iip) throws IOException { fsd.readLock(); try { HdfsFileStatus status = null; - final INodesInPath iip = fsd.getINodesInPath(src, resolveLink); if (FSDirectory.isExactReservedName(iip.getPathComponents())) { status = FSDirectory.DOT_RESERVED_STATUS; } else if (iip.isDotSnapshotDir()) { @@ -401,7 +395,7 @@ class FSDirStatAndListingOp { status = FSDirectory.DOT_SNAPSHOT_DIR_STATUS; } } else { - status = getFileInfo(fsd, src, iip, isRawPath, true); + status = getFileInfo(fsd, iip, true); } return status; } finally { @@ -423,15 +417,12 @@ class FSDirStatAndListingOp { */ private static HdfsFileStatus createFileStatus( FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs, - boolean needLocation, byte storagePolicy, int snapshot, boolean isRawPath, - INodesInPath iip) + boolean needLocation, byte storagePolicy, INodesInPath iip) throws IOException { if (needLocation) { - return createLocatedFileStatus(fsd, path, nodeAttrs, storagePolicy, - snapshot, isRawPath, iip); + return createLocatedFileStatus(fsd, path, nodeAttrs, storagePolicy, iip); } else { - return createFileStatus(fsd, path, nodeAttrs, storagePolicy, - snapshot, isRawPath, iip); + return createFileStatus(fsd, path, nodeAttrs, storagePolicy, iip); } } @@ -445,8 +436,7 @@ class FSDirStatAndListingOp { INodesInPath iip) throws IOException { INodeAttributes nodeAttrs = getINodeAttributes( fsd, fullPath, path, iip.getLastINode(), snapshot); - return createFileStatus(fsd, path, nodeAttrs, storagePolicy, - snapshot, isRawPath, iip); + return createFileStatus(fsd, path, nodeAttrs, storagePolicy, iip); } /** @@ -454,14 +444,15 @@ class FSDirStatAndListingOp { * @param iip the INodesInPath containing the target INode and its ancestors */ static HdfsFileStatus createFileStatus( - FSDirectory fsd, byte[] path, - INodeAttributes nodeAttrs, byte storagePolicy, int snapshot, - boolean isRawPath, INodesInPath iip) throws IOException { + FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs, + byte storagePolicy, INodesInPath iip) throws IOException { long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; final boolean isEncrypted; final INode node = iip.getLastINode(); + final int snapshot = iip.getPathSnapshotId(); + final boolean isRawPath = iip.isRaw(); final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp .getFileEncryptionInfo(fsd, node, snapshot, iip); @@ -511,10 +502,9 @@ class FSDirStatAndListingOp { * Create FileStatus with location info by file INode * @param iip the INodesInPath containing the target INode and its ancestors */ - private static HdfsLocatedFileStatus createLocatedFileStatus( + private static HdfsFileStatus createLocatedFileStatus( FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs, - byte storagePolicy, int snapshot, - boolean isRawPath, INodesInPath iip) throws IOException { + byte storagePolicy, INodesInPath iip) throws IOException { assert fsd.hasReadLock(); long size = 0; // length is zero for directories short replication = 0; @@ -522,6 +512,8 @@ class FSDirStatAndListingOp { LocatedBlocks loc = null; final boolean isEncrypted; final INode node = iip.getLastINode(); + final int snapshot = iip.getPathSnapshotId(); + final boolean isRawPath = iip.isRaw(); final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp .getFileEncryptionInfo(fsd, node, snapshot, iip); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 84290241865..030d8cc8a96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -340,7 +340,6 @@ class FSDirWriteFileOp { version = ezInfo.protocolVersion; } - boolean isRawPath = FSDirectory.isReservedRawName(src); FSDirectory fsd = fsn.getFSDirectory(); INodesInPath iip = fsd.resolvePathForWrite(pc, src); src = iip.getPath(); @@ -444,7 +443,7 @@ class FSDirWriteFileOp { NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " + src + " inode " + newNode.getId() + " " + holder); } - return FSDirStatAndListingOp.getFileInfo(fsd, src, false, isRawPath); + return FSDirStatAndListingOp.getFileInfo(fsd, iip); } static EncryptionKeyInfo getEncryptionKeyInfo(FSNamesystem fsn, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index b6652e4713c..5dc5a03945c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -531,21 +531,24 @@ public class FSDirectory implements Closeable { * @throws FileNotFoundException * @throws AccessControlException */ - INodesInPath resolvePath(FSPermissionChecker pc, String src) + @VisibleForTesting + public INodesInPath resolvePath(FSPermissionChecker pc, String src) throws UnresolvedLinkException, FileNotFoundException, AccessControlException { return resolvePath(pc, src, true); } - INodesInPath resolvePath(FSPermissionChecker pc, String src, + @VisibleForTesting + public INodesInPath resolvePath(FSPermissionChecker pc, String src, boolean resolveLink) throws UnresolvedLinkException, FileNotFoundException, AccessControlException { byte[][] components = INode.getPathComponents(src); - if (isPermissionEnabled && pc != null && isReservedRawName(components)) { + boolean isRaw = isReservedRawName(components); + if (isPermissionEnabled && pc != null && isRaw) { pc.checkSuperuserPrivilege(); } components = resolveComponents(components, this); - return INodesInPath.resolve(rootDir, components, resolveLink); + return INodesInPath.resolve(rootDir, components, isRaw, resolveLink); } INodesInPath resolvePathForWrite(FSPermissionChecker pc, String src) @@ -1662,8 +1665,7 @@ public class FSDirectory implements Closeable { HdfsFileStatus getAuditFileInfo(INodesInPath iip) throws IOException { return (namesystem.isAuditEnabled() && namesystem.isExternalInvocation()) - ? FSDirStatAndListingOp.getFileInfo(this, iip.getPath(), iip, false, - false) : null; + ? FSDirStatAndListingOp.getFileInfo(this, iip, false) : null; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index 86cab28ef00..af8998f3518 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -126,6 +126,12 @@ public class INodesInPath { static INodesInPath resolve(final INodeDirectory startingDir, final byte[][] components, final boolean resolveLink) throws UnresolvedLinkException { + return resolve(startingDir, components, false, resolveLink); + } + + static INodesInPath resolve(final INodeDirectory startingDir, + final byte[][] components, final boolean isRaw, + final boolean resolveLink) throws UnresolvedLinkException { Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0); INode curNode = startingDir; @@ -225,7 +231,7 @@ public class INodesInPath { System.arraycopy(inodes, 0, newNodes, 0, newNodes.length); inodes = newNodes; } - return new INodesInPath(inodes, components, isSnapshot, snapshotId); + return new INodesInPath(inodes, components, isRaw, isSnapshot, snapshotId); } private static boolean shouldUpdateLatestId(int sid, int snapshotId) { @@ -249,7 +255,8 @@ public class INodesInPath { INode[] inodes = new INode[iip.inodes.length]; System.arraycopy(iip.inodes, 0, inodes, 0, inodes.length); inodes[pos] = inode; - return new INodesInPath(inodes, iip.path, iip.isSnapshot, iip.snapshotId); + return new INodesInPath(inodes, iip.path, iip.isRaw, + iip.isSnapshot, iip.snapshotId); } /** @@ -267,7 +274,8 @@ public class INodesInPath { byte[][] path = new byte[iip.path.length + 1][]; System.arraycopy(iip.path, 0, path, 0, path.length - 1); path[path.length - 1] = childName; - return new INodesInPath(inodes, path, iip.isSnapshot, iip.snapshotId); + return new INodesInPath(inodes, path, iip.isRaw, + iip.isSnapshot, iip.snapshotId); } private final byte[][] path; @@ -279,6 +287,13 @@ public class INodesInPath { * true if this path corresponds to a snapshot */ private final boolean isSnapshot; + + /** + * true if this is a /.reserved/raw path. path component resolution strips + * it from the path so need to track it separately. + */ + private final boolean isRaw; + /** * For snapshot paths, it is the id of the snapshot; or * {@link Snapshot#CURRENT_STATE_ID} if the snapshot does not exist. For @@ -287,17 +302,18 @@ public class INodesInPath { */ private final int snapshotId; - private INodesInPath(INode[] inodes, byte[][] path, boolean isSnapshot, - int snapshotId) { + private INodesInPath(INode[] inodes, byte[][] path, boolean isRaw, + boolean isSnapshot,int snapshotId) { Preconditions.checkArgument(inodes != null && path != null); this.inodes = inodes; this.path = path; + this.isRaw = isRaw; this.isSnapshot = isSnapshot; this.snapshotId = snapshotId; } private INodesInPath(INode[] inodes, byte[][] path) { - this(inodes, path, false, CURRENT_STATE_ID); + this(inodes, path, false, false, CURRENT_STATE_ID); } /** @@ -400,7 +416,7 @@ public class INodesInPath { final byte[][] apath = new byte[length][]; System.arraycopy(this.inodes, 0, anodes, 0, length); System.arraycopy(this.path, 0, apath, 0, length); - return new INodesInPath(anodes, apath, false, snapshotId); + return new INodesInPath(anodes, apath, isRaw, false, snapshotId); } /** @@ -428,7 +444,7 @@ public class INodesInPath { byte[][] existingPath = new byte[i][]; System.arraycopy(inodes, 0, existing, 0, i); System.arraycopy(path, 0, existingPath, 0, i); - return new INodesInPath(existing, existingPath, false, snapshotId); + return new INodesInPath(existing, existingPath, isRaw, false, snapshotId); } /** @@ -438,10 +454,20 @@ public class INodesInPath { return this.isSnapshot; } + /** + * @return if .snapshot is the last path component. + */ boolean isDotSnapshotDir() { return isDotSnapshotDir(getLastLocalName()); } + /** + * @return if this is a /.reserved/raw path. + */ + public boolean isRaw() { + return isRaw; + } + private static String toString(INode inode) { return inode == null? null: inode.getLocalName(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java index 449f71579f3..c09d34644f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; +import org.apache.hadoop.hdfs.server.namenode.INodesInPath; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.log4j.Level; @@ -49,6 +50,8 @@ import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesNotEqual; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.apache.hadoop.test.GenericTestUtils.assertMatches; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class TestReservedRawPaths { @@ -99,6 +102,24 @@ public class TestReservedRawPaths { } } + /** + * Verify resolving path will return an iip that tracks if the original + * path was a raw path. + */ + @Test(timeout = 120000) + public void testINodesInPath() throws IOException { + FSDirectory fsd = cluster.getNamesystem().getFSDirectory(); + final String path = "/path"; + + INodesInPath iip = fsd.resolvePath(null, path); + assertFalse(iip.isRaw()); + assertEquals(path, iip.getPath()); + + iip = fsd.resolvePath(null, "/.reserved/raw" + path); + assertTrue(iip.isRaw()); + assertEquals(path, iip.getPath()); + } + /** * Basic read/write tests of raw files. * Create a non-encrypted file