From a5d12d9c1f2c0e6fcd918ee8e614dcaf203e77de Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Wed, 3 Aug 2016 13:15:28 -0500 Subject: [PATCH] HDFS-10674. Optimize creating a full path from an inode. Contributed by Daryn Sharp. (cherry picked from commit 22ef5286bc8511ddee9594b7cecc598bf41a850b) --- .../hdfs/server/namenode/FSDirectory.java | 55 +------------------ .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../hadoop/hdfs/server/namenode/INode.java | 20 ++++++- .../hdfs/server/namenode/INodesInPath.java | 4 +- .../namenode/TestSnapshotPathINodes.java | 7 ++- 5 files changed, 29 insertions(+), 59 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index f1400208175..fc41143aea7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -885,57 +885,6 @@ public class FSDirectory implements Closeable { return typeSpaceDeltas; } - /** Return the name of the path represented by inodes at [0, pos] */ - static String getFullPathName(INode[] inodes, int pos) { - StringBuilder fullPathName = new StringBuilder(); - if (inodes[0].isRoot()) { - if (pos == 0) return Path.SEPARATOR; - } else { - fullPathName.append(inodes[0].getLocalName()); - } - - for (int i=1; i<=pos; i++) { - fullPathName.append(Path.SEPARATOR_CHAR).append(inodes[i].getLocalName()); - } - return fullPathName.toString(); - } - - /** - * @return the relative path of an inode from one of its ancestors, - * represented by an array of inodes. - */ - private static INode[] getRelativePathINodes(INode inode, INode ancestor) { - // calculate the depth of this inode from the ancestor - int depth = 0; - for (INode i = inode; i != null && !i.equals(ancestor); i = i.getParent()) { - depth++; - } - INode[] inodes = new INode[depth]; - - // fill up the inodes in the path from this inode to root - for (int i = 0; i < depth; i++) { - if (inode == null) { - NameNode.stateChangeLog.warn("Could not get full path." - + " Corresponding file might have deleted already."); - return null; - } - inodes[depth-i-1] = inode; - inode = inode.getParent(); - } - return inodes; - } - - private static INode[] getFullPathINodes(INode inode) { - return getRelativePathINodes(inode, null); - } - - /** Return the full path name of the specified inode */ - static String getFullPathName(INode inode) { - INode[] inodes = getFullPathINodes(inode); - // inodes can be null only when its called without holding lock - return inodes == null ? "" : getFullPathName(inodes, inodes.length - 1); - } - /** * Add the given child to the namespace. * @param existing the INodesInPath containing all the ancestral INodes @@ -987,9 +936,7 @@ public class FSDirectory implements Closeable { try { q.verifyQuota(deltas); } catch (QuotaExceededException e) { - List inodes = iip.getReadOnlyINodes(); - final String path = getFullPathName(inodes.toArray(new INode[inodes.size()]), i); - e.setPathName(path); + e.setPathName(iip.getPath(i)); throw e; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index bdf15bce95a..fce2a549bf3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5033,7 +5033,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, final INodeFile inode = getBlockCollection(blk); skip++; if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) { - String src = FSDirectory.getFullPathName(inode); + String src = inode.getFullPathName(); if (src.startsWith(path)){ corruptFiles.add(new CorruptFileBlockInfo(src, blk)); count++; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index eb910d4b1f2..38883b6ed0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -569,7 +569,25 @@ public abstract class INode implements INodeAttributes, Diff.Element { public String getFullPathName() { // Get the full path name of this inode. - return FSDirectory.getFullPathName(this); + if (isRoot()) { + return Path.SEPARATOR; + } + // compute size of needed bytes for the path + int idx = 0; + for (INode inode = this; inode != null; inode = inode.getParent()) { + // add component + delimiter (if not tail component) + idx += inode.getLocalNameBytes().length + (inode != this ? 1 : 0); + } + byte[] path = new byte[idx]; + for (INode inode = this; inode != null; inode = inode.getParent()) { + if (inode != this) { + path[--idx] = Path.SEPARATOR_CHAR; + } + byte[] name = inode.getLocalNameBytes(); + idx -= name.length; + System.arraycopy(name, 0, path, idx, name.length); + } + return DFSUtil.bytes2String(path); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index 1d540b7fcea..a7b772c6aed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -347,11 +347,11 @@ public class INodesInPath { } public String getParentPath() { - return getPath(path.length - 1); + return getPath(path.length - 2); } public String getPath(int pos) { - return DFSUtil.byteArray2PathString(path, 0, pos); + return DFSUtil.byteArray2PathString(path, 0, pos + 1); // it's a length... } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java index 45c65ef8ce9..d022903c380 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java @@ -155,7 +155,12 @@ public class TestSnapshotPathINodes { sub1.toString()); assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(), dir.toString()); - + + assertEquals(Path.SEPARATOR, nodesInPath.getPath(0)); + assertEquals(dir.toString(), nodesInPath.getPath(1)); + assertEquals(sub1.toString(), nodesInPath.getPath(2)); + assertEquals(file1.toString(), nodesInPath.getPath(3)); + nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false); assertEquals(nodesInPath.length(), components.length); assertSnapshot(nodesInPath, false, null, -1);