From 1b3b09d94794622e8336220d897a1f10c4654677 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Mon, 29 Oct 2012 14:11:04 +0000 Subject: [PATCH] HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable returningmore than INode array. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1403304 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/namenode/FSDirectory.java | 35 ++++++------ .../hdfs/server/namenode/INodeDirectory.java | 53 ++++++++++++------- 3 files changed, 57 insertions(+), 34 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 30a2f69f6fc..ab48d59e0f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -155,6 +155,9 @@ Trunk (Unreleased) HDFS-4122. Cleanup HDFS logs and reduce the size of logged messages. (suresh) + HDFS-4124. Refactor INodeDirectory#getExistingPathINodes() to enable + returningmore than INode array. (Jing Zhao via suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 19955075ab6..6b3cb04a187 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -44,10 +44,10 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.util.ByteArray; import com.google.common.base.Preconditions; @@ -548,8 +549,9 @@ public class FSDirectory implements Closeable { } byte[][] dstComponents = INode.getPathComponents(dst); - INode[] dstInodes = new INode[dstComponents.length]; - rootDir.getExistingPathINodes(dstComponents, dstInodes, false); + INodesInPath dstInodesInPath = rootDir.getExistingPathINodes(dstComponents, + dstComponents.length, false); + INode[] dstInodes = dstInodesInPath.getINodes(); if (dstInodes[dstInodes.length-1] != null) { NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +"failed to rename "+src+" to "+dst+ @@ -564,7 +566,7 @@ public class FSDirectory implements Closeable { } // Ensure dst has quota to accommodate rename - verifyQuotaForRename(srcInodes,dstInodes); + verifyQuotaForRename(srcInodes, dstInodes); INode dstChild = null; INode srcChild = null; @@ -668,8 +670,9 @@ public class FSDirectory implements Closeable { throw new IOException(error); } final byte[][] dstComponents = INode.getPathComponents(dst); - final INode[] dstInodes = new INode[dstComponents.length]; - rootDir.getExistingPathINodes(dstComponents, dstInodes, false); + INodesInPath dstInodesInPath = rootDir.getExistingPathINodes(dstComponents, + dstComponents.length, false); + final INode[] dstInodes = dstInodesInPath.getINodes(); INode dstInode = dstInodes[dstInodes.length - 1]; if (dstInodes.length == 1) { error = "rename destination cannot be the root"; @@ -1443,12 +1446,13 @@ public class FSDirectory implements Closeable { src = normalizePath(src); String[] names = INode.getPathNames(src); byte[][] components = INode.getPathComponents(names); - INode[] inodes = new INode[components.length]; - final int lastInodeIndex = inodes.length - 1; + final int lastInodeIndex = components.length - 1; writeLock(); try { - rootDir.getExistingPathINodes(components, inodes, false); + INodesInPath inodesInPath = rootDir.getExistingPathINodes(components, + components.length, false); + INode[] inodes = inodesInPath.getINodes(); // find the index of the first null in inodes[] StringBuilder pathbuilder = new StringBuilder(); @@ -1518,16 +1522,14 @@ public class FSDirectory implements Closeable { return true; } - /** - */ INode unprotectedMkdir(String src, PermissionStatus permissions, long timestamp) throws QuotaExceededException, UnresolvedLinkException { assert hasWriteLock(); byte[][] components = INode.getPathComponents(src); - INode[] inodes = new INode[components.length]; - - rootDir.getExistingPathINodes(components, inodes, false); + INodesInPath inodesInPath = rootDir.getExistingPathINodes(components, + components.length, false); + INode[] inodes = inodesInPath.getINodes(); unprotectedMkdir(inodes, inodes.length-1, components[inodes.length-1], permissions, timestamp); return inodes[inodes.length-1]; @@ -1556,10 +1558,11 @@ public class FSDirectory implements Closeable { byte[] path = components[components.length-1]; child.setLocalName(path); cacheName(child); - INode[] inodes = new INode[components.length]; writeLock(); try { - rootDir.getExistingPathINodes(components, inodes, false); + INodesInPath inodesInPath = rootDir.getExistingPathINodes(components, + components.length, false); + INode[] inodes = inodesInPath.getINodes(); return addChild(inodes, inodes.length-1, child, childDiskspace); } finally { writeUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index b10193053a0..256ab090ec8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -130,9 +130,9 @@ class INodeDirectory extends INode { */ private INode getNode(byte[][] components, boolean resolveLink ) throws UnresolvedLinkException { - INode[] inode = new INode[1]; - getExistingPathINodes(components, inode, resolveLink); - return inode[0]; + INodesInPath inodesInPath = getExistingPathINodes(components, 1, + resolveLink); + return inodesInPath.inodes[0]; } /** @@ -180,27 +180,29 @@ class INodeDirectory extends INode { * fill the array with [rootINode,c1,c2,null] * * @param components array of path component name - * @param existing array to fill with existing INodes + * @param numOfINodes number of INodes to return * @param resolveLink indicates whether UnresolvedLinkException should * be thrown when the path refers to a symbolic link. - * @return number of existing INodes in the path + * @return the specified number of existing INodes in the path */ - int getExistingPathINodes(byte[][] components, INode[] existing, - boolean resolveLink) throws UnresolvedLinkException { + INodesInPath getExistingPathINodes(byte[][] components, int numOfINodes, + boolean resolveLink) + throws UnresolvedLinkException { assert this.compareTo(components[0]) == 0 : "Incorrect name " + getLocalName() + " expected " + (components[0] == null? null: DFSUtil.bytes2String(components[0])); + INodesInPath existing = new INodesInPath(numOfINodes); INode curNode = this; int count = 0; - int index = existing.length - components.length; + int index = numOfINodes - components.length; if (index > 0) { index = 0; } while (count < components.length && curNode != null) { final boolean lastComp = (count == components.length - 1); if (index >= 0) { - existing[index] = curNode; + existing.inodes[index] = curNode; } if (curNode.isLink() && (!lastComp || (lastComp && resolveLink))) { final String path = constructPath(components, 0, components.length); @@ -225,7 +227,7 @@ class INodeDirectory extends INode { count++; index++; } - return count; + return existing; } /** @@ -246,11 +248,9 @@ class INodeDirectory extends INode { INode[] getExistingPathINodes(String path, boolean resolveLink) throws UnresolvedLinkException { byte[][] components = getPathComponents(path); - INode[] inodes = new INode[components.length]; - - this.getExistingPathINodes(components, inodes, resolveLink); - - return inodes; + INodesInPath inodes = this.getExistingPathINodes(components, + components.length, resolveLink); + return inodes.inodes; } /** @@ -341,9 +341,8 @@ class INodeDirectory extends INode { if (pathComponents.length < 2) // add root return null; // Gets the parent INode - INode[] inodes = new INode[2]; - getExistingPathINodes(pathComponents, inodes, false); - INode inode = inodes[0]; + INodesInPath inodes = getExistingPathINodes(pathComponents, 2, false); + INode inode = inodes.inodes[0]; if (inode == null) { throw new FileNotFoundException("Parent path does not exist: "+ DFSUtil.byteArray2String(pathComponents)); @@ -443,4 +442,22 @@ class INodeDirectory extends INode { children = null; return total; } + + /** + * Used by + * {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}. + * Containing INodes information resolved from a given path. + */ + static class INodesInPath { + private INode[] inodes; + + public INodesInPath(int number) { + assert (number >= 0); + this.inodes = new INode[number]; + } + + INode[] getINodes() { + return inodes; + } + } }