From f1d0db87cdf2436ebf51fbdaa51dd9af93e53b9b Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Sat, 26 Jul 2014 04:21:16 +0000 Subject: [PATCH] HDFS-6749. Merging change r1613561 from trunk to branch-2. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1613562 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/namenode/FSNamesystem.java | 8 ++++++++ .../hadoop/hdfs/server/namenode/TestINodeFile.java | 14 ++++++++++++++ 3 files changed, 25 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0be0de680a1..7b98fb07095 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -128,6 +128,9 @@ Release 2.6.0 - UNRELEASED HDFS-6752. Avoid Address bind errors in TestDatanodeConfig#testMemlockLimit (vinayakumarb) + HDFS-6749. FSNamesystem methods should call resolvePath. + (Charles Lamb via cnauroth) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index fe3e7a94e4e..142dbfef769 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3720,8 +3720,10 @@ boolean isFileClosed(String src) StandbyException, IOException { FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { + src = FSDirectory.resolvePath(src, pathComponents, dir); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { checkTraverse(pc, src); @@ -8183,9 +8185,11 @@ AclStatus getAclStatus(String src) throws IOException { nnConf.checkAclsConfigFlag(); FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { checkOperation(OperationCategory.READ); + src = FSDirectory.resolvePath(src, pathComponents, dir); if (isPermissionEnabled) { checkPermission(pc, src, false, null, null, null, null); } @@ -8288,8 +8292,10 @@ List getXAttrs(String src, List xAttrs) throws IOException { } } checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { + src = FSDirectory.resolvePath(src, pathComponents, dir); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { checkPathAccess(pc, src, FsAction.READ); @@ -8333,8 +8339,10 @@ List listXAttrs(String src) throws IOException { nnConf.checkXAttrsConfigFlag(); final FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { + src = FSDirectory.resolvePath(src, pathComponents, dir); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { /* To access xattr names, you need EXECUTE in the owning directory. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 00b316223a0..a694a9de3ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -524,6 +524,7 @@ public void testInodeIdBasedPaths() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); @@ -571,6 +572,19 @@ public void testInodeIdBasedPaths() throws Exception { // ClientProtocol#getPreferredBlockSize assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString())); + + /* + * HDFS-6749 added missing calls to FSDirectory.resolvePath in the + * following four methods. The calls below ensure that + * /.reserved/.inodes paths work properly. No need to check return + * values as these methods are tested elsewhere. + */ + { + fs.isFileClosed(testFileInodePath); + fs.getAclStatus(testFileInodePath); + fs.getXAttrs(testFileInodePath); + fs.listXAttrs(testFileInodePath); + } // symbolic link related tests