diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d0f39565056..3571a4fe8a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -421,6 +421,10 @@ Release 2.0.3-alpha - Unreleased HDFS-4107. Add utility methods for casting INode to INodeFile and INodeFileUnderConstruction. (szetszwo) + HDFS-4112. A few improvements on INodeDirectory include adding a utility + method for casting; avoiding creation of new empty lists; cleaning up + some code and rewriting some javadoc. (szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index d7935e0d58c..fffd59dd220 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -742,7 +742,7 @@ public class FSDirectory implements Closeable { throw new FileAlreadyExistsException(error); } List children = dstInode.isDirectory() ? - ((INodeDirectory) dstInode).getChildrenRaw() : null; + ((INodeDirectory) dstInode).getChildren() : null; if (children != null && children.size() != 0) { error = "rename cannot overwrite non empty destination directory " + dst; @@ -1065,35 +1065,21 @@ public class FSDirectory implements Closeable { return true; } - /** Return if a directory is empty or not **/ - boolean isDirEmpty(String src) throws UnresolvedLinkException { - boolean dirNotEmpty = true; - if (!isDir(src)) { - return true; - } + /** + * @return true if the path is a non-empty directory; otherwise, return false. + */ + boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException { readLock(); try { - INode targetNode = rootDir.getNode(src, false); - assert targetNode != null : "should be taken care in isDir() above"; - if (((INodeDirectory)targetNode).getChildren().size() != 0) { - dirNotEmpty = false; + final INode inode = rootDir.getNode(path, false); + if (inode == null || !inode.isDirectory()) { + //not found or not a directory + return false; } + return ((INodeDirectory)inode).getChildrenList().size() != 0; } finally { readUnlock(); } - return dirNotEmpty; - } - - boolean isEmpty() { - try { - return isDirEmpty("/"); - } catch (UnresolvedLinkException e) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("/ cannot be a symlink"); - } - assert false : "/ cannot be a symlink"; - return true; - } } /** @@ -1241,7 +1227,7 @@ public class FSDirectory implements Closeable { targetNode, needLocation)}, 0); } INodeDirectory dirInode = (INodeDirectory)targetNode; - List contents = dirInode.getChildren(); + List contents = dirInode.getChildrenList(); int startChild = dirInode.nextChild(startAfter); int totalNumChildren = contents.size(); int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit); @@ -1753,7 +1739,7 @@ public class FSDirectory implements Closeable { } if (maxDirItems != 0) { INodeDirectory parent = (INodeDirectory)pathComponents[pos-1]; - int count = parent.getChildren().size(); + int count = parent.getChildrenList().size(); if (count >= maxDirItems) { throw new MaxDirectoryItemsExceededException(maxDirItems, count); } @@ -1902,7 +1888,7 @@ public class FSDirectory implements Closeable { * INode. using 'parent' is not currently recommended. */ nodesInPath.add(dir); - for (INode child : dir.getChildren()) { + for (INode child : dir.getChildrenList()) { if (child.isDirectory()) { updateCountForINodeWithQuota((INodeDirectory)child, counts, nodesInPath); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 0ec2ae00cc0..62d9a361478 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -246,10 +246,8 @@ class FSImageFormat { private int loadDirectory(DataInputStream in) throws IOException { String parentPath = FSImageSerialization.readString(in); FSDirectory fsDir = namesystem.dir; - INode parent = fsDir.rootDir.getNode(parentPath, true); - if (parent == null || !parent.isDirectory()) { - throw new IOException("Path " + parentPath + "is not a directory."); - } + final INodeDirectory parent = INodeDirectory.valueOf( + fsDir.rootDir.getNode(parentPath, true), parentPath); int numChildren = in.readInt(); for(int i=0; i children = current.getChildrenRaw(); + List children = current.getChildren(); if (children == null || children.isEmpty()) return; // print prefix (parent directory name) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 64ce99f5754..496ddb6b9f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2686,7 +2686,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (isInSafeMode()) { throw new SafeModeException("Cannot delete " + src, safeMode); } - if (!recursive && !dir.isDirEmpty(src)) { + if (!recursive && dir.isNonEmptyDirectory(src)) { throw new IOException(src + " is non empty"); } if (enforcePermission && isPermissionEnabled) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index fd706b73350..5fb1d8049fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -173,7 +173,7 @@ class FSPermissionChecker { INodeDirectory d = directories.pop(); check(d, access); - for(INode child : d.getChildren()) { + for(INode child : d.getChildrenList()) { if (child.isDirectory()) { directories.push((INodeDirectory)child); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index c8cbd4c1544..f070e2f04da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; @@ -39,7 +41,8 @@ import com.google.common.primitives.SignedBytes; */ @InterfaceAudience.Private public abstract class INode implements Comparable { - /* + static final List EMPTY_LIST = Collections.unmodifiableList(new ArrayList()); + /** * The inode name is in java UTF8 encoding; * The name in HdfsFileStatus should keep the same encoding as this. * if this encoding is changed, implicitly getFileInfo and listStatus in diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index f0483a8cc5f..fc31f554985 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -33,6 +33,18 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; * Directory INode class. */ public class INodeDirectory extends INode { + /** Cast INode to INodeDirectory. */ + public static INodeDirectory valueOf(INode inode, String path + ) throws IOException { + if (inode == null) { + throw new IOException("Directory does not exist: " + path); + } + if (!inode.isDirectory()) { + throw new IOException("Path is not a directory: " + path); + } + return (INodeDirectory)inode; + } + protected static final int DEFAULT_FILES_PER_DIRECTORY = 5; final static String ROOT_NAME = ""; @@ -130,11 +142,11 @@ public class INodeDirectory extends INode { } /** - * Return the INode of the last component in components, or null if the last + * @return the INode of the last component in components, or null if the last * component does not exist. */ - private INode getNode(byte[][] components, boolean resolveLink) - throws UnresolvedLinkException { + private INode getNode(byte[][] components, boolean resolveLink + ) throws UnresolvedLinkException { INode[] inode = new INode[1]; getExistingPathINodes(components, inode, resolveLink); return inode[0]; @@ -317,10 +329,7 @@ public class INodeDirectory extends INode { T addNode(String path, T newNode ) throws FileNotFoundException, UnresolvedLinkException { byte[][] pathComponents = getPathComponents(path); - if(addToParent(pathComponents, newNode, - true) == null) - return null; - return newNode; + return addToParent(pathComponents, newNode, true) == null? null: newNode; } /** @@ -344,10 +353,9 @@ public class INodeDirectory extends INode { return parent; } - INodeDirectory getParent(byte[][] pathComponents) - throws FileNotFoundException, UnresolvedLinkException { - int pathLen = pathComponents.length; - if (pathLen < 2) // add root + INodeDirectory getParent(byte[][] pathComponents + ) throws FileNotFoundException, UnresolvedLinkException { + if (pathComponents.length < 2) // add root return null; // Gets the parent INode INode[] inodes = new INode[2]; @@ -373,21 +381,15 @@ public class INodeDirectory extends INode { * @throws FileNotFoundException if parent does not exist or * is not a directory. */ - INodeDirectory addToParent( byte[][] pathComponents, - INode newNode, - boolean propagateModTime - ) throws FileNotFoundException, - UnresolvedLinkException { - - int pathLen = pathComponents.length; - if (pathLen < 2) // add root + INodeDirectory addToParent(byte[][] pathComponents, INode newNode, + boolean propagateModTime) throws FileNotFoundException, UnresolvedLinkException { + if (pathComponents.length < 2) { // add root return null; - newNode.name = pathComponents[pathLen-1]; + } + newNode.name = pathComponents[pathComponents.length - 1]; // insert into the parent children list INodeDirectory parent = getParent(pathComponents); - if(parent.addChild(newNode, propagateModTime) == null) - return null; - return parent; + return parent.addChild(newNode, propagateModTime) == null? null: parent; } @Override @@ -433,11 +435,15 @@ public class INodeDirectory extends INode { } /** + * @return an empty list if the children list is null; + * otherwise, return the children list. + * The returned list should not be modified. */ - List getChildren() { - return children==null ? new ArrayList() : children; + public List getChildrenList() { + return children==null ? EMPTY_LIST : children; } - List getChildrenRaw() { + /** @return the children list which is possibly null. */ + public List getChildren() { return children; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 53bff16957f..fcf50580c5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -234,6 +234,14 @@ public class TestINodeFile { } catch(FileNotFoundException fnfe) { assertTrue(fnfe.getMessage().contains("File does not exist")); } + + //cast to INodeDirectory, should fail + try { + INodeDirectory.valueOf(from, path); + fail(); + } catch(IOException ioe) { + assertTrue(ioe.getMessage().contains("Directory does not exist")); + } } {//cast from INodeFile @@ -251,6 +259,14 @@ public class TestINodeFile { } catch(IOException ioe) { assertTrue(ioe.getMessage().contains("File is not under construction")); } + + //cast to INodeDirectory, should fail + try { + INodeDirectory.valueOf(from, path); + fail(); + } catch(IOException ioe) { + assertTrue(ioe.getMessage().contains("Path is not a directory")); + } } {//cast from INodeFileUnderConstruction @@ -265,6 +281,14 @@ public class TestINodeFile { final INodeFileUnderConstruction u = INodeFileUnderConstruction.valueOf( from, path); assertTrue(u == from); + + //cast to INodeDirectory, should fail + try { + INodeDirectory.valueOf(from, path); + fail(); + } catch(IOException ioe) { + assertTrue(ioe.getMessage().contains("Path is not a directory")); + } } {//cast from INodeDirectory @@ -285,6 +309,10 @@ public class TestINodeFile { } catch(FileNotFoundException fnfe) { assertTrue(fnfe.getMessage().contains("Path is not a file")); } + + //cast to INodeDirectory, should success + final INodeDirectory d = INodeDirectory.valueOf(from, path); + assertTrue(d == from); } } }