From bf72abd737d413dc36dd489d4d2352d9a7c0f1a2 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Sat, 29 Oct 2011 00:22:34 +0000 Subject: [PATCH] svn merge -c 1190708 from trunk for HDFS-2436. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1190711 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/FSDirectory.java | 26 ++++++++++++------- .../hdfs/server/namenode/FSNamesystem.java | 4 +-- .../hadoop/hdfs/server/namenode/INode.java | 1 - .../org/apache/hadoop/hdfs/TestSetTimes.java | 18 +++++++++++++ 5 files changed, 40 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 22f7f4e362b..ab66fae08d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -755,6 +755,9 @@ Release 0.23.0 - Unreleased HDFS-2509. Add a test for DistributedFileSystem.getFileChecksum(..) on directories or non existing files. (Uma Maheswara Rao G via szetszwo) + HDFS-2436. Change FSNamesystem.setTimes(..) for allowing setting times on + directories. (Uma Maheswara Rao G via szetszwo) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index a5129aad38b..93684b627b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1250,13 +1250,21 @@ Block[] getFileBlocks(String src) throws UnresolvedLinkException { * Get {@link INode} associated with the file. */ INodeFile getFileINode(String src) throws UnresolvedLinkException { + INode inode = getINode(src); + if (inode == null || inode.isDirectory()) + return null; + assert !inode.isLink(); + return (INodeFile) inode; + } + + /** + * Get {@link INode} associated with the file / directory. + */ + INode getINode(String src) throws UnresolvedLinkException { readLock(); try { - INode inode = rootDir.getNode(src, true); - if (inode == null || inode.isDirectory()) - return null; - assert !inode.isLink(); - return (INodeFile)inode; + INode iNode = rootDir.getNode(src, true); + return iNode; } finally { readUnlock(); } @@ -1991,9 +1999,9 @@ long totalInodes() { } /** - * Sets the access time on the file. Logs it in the transaction log. + * Sets the access time on the file/directory. Logs it in the transaction log. */ - void setTimes(String src, INodeFile inode, long mtime, long atime, boolean force) { + void setTimes(String src, INode inode, long mtime, long atime, boolean force) { boolean status = false; writeLock(); try { @@ -2009,11 +2017,11 @@ void setTimes(String src, INodeFile inode, long mtime, long atime, boolean force boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force) throws UnresolvedLinkException { assert hasWriteLock(); - INodeFile inode = getFileINode(src); + INode inode = getINode(src); return unprotectedSetTimes(src, inode, mtime, atime, force); } - private boolean unprotectedSetTimes(String src, INodeFile inode, long mtime, + private boolean unprotectedSetTimes(String src, INode inode, long mtime, long atime, boolean force) { assert hasWriteLock(); boolean status = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 08121f37782..47befa76128 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -940,7 +940,7 @@ void setTimes(String src, long mtime, long atime) if (isPermissionEnabled) { checkPathAccess(src, FsAction.WRITE); } - INodeFile inode = dir.getFileINode(src); + INode inode = dir.getINode(src); if (inode != null) { dir.setTimes(src, inode, mtime, atime, true); if (auditLog.isInfoEnabled() && isExternalInvocation()) { @@ -950,7 +950,7 @@ void setTimes(String src, long mtime, long atime) "setTimes", src, null, stat); } } else { - throw new FileNotFoundException("File " + src + " does not exist."); + throw new FileNotFoundException("File/Directory " + src + " does not exist."); } } finally { writeUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index c9cea600257..83d9858586e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -304,7 +304,6 @@ void setModificationTime(long modtime) { * Always set the last modification time of inode. */ void setModificationTimeForce(long modtime) { - assert !isDirectory(); this.modificationTime = modtime; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java index b230391dd02..707a2b1fb7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java @@ -158,6 +158,24 @@ public void testTimes() throws IOException { assertTrue(atime2 == stat.getAccessTime()); assertTrue(mtime2 == mtime3); + long mtime4 = System.currentTimeMillis() - (3600L * 1000L); + long atime4 = System.currentTimeMillis(); + fileSys.setTimes(dir1, mtime4, atime4); + // check new modification time on file + stat = fileSys.getFileStatus(dir1); + assertTrue("Not matching the modification times", mtime4 == stat + .getModificationTime()); + assertTrue("Not matching the access times", atime4 == stat + .getAccessTime()); + + Path nonExistingDir = new Path(dir1, "/nonExistingDir/"); + try { + fileSys.setTimes(nonExistingDir, mtime4, atime4); + fail("Expecting FileNotFoundException"); + } catch (FileNotFoundException e) { + assertTrue(e.getMessage().contains( + "File/Directory " + nonExistingDir.toString() + " does not exist.")); + } // shutdown cluster and restart cluster.shutdown(); try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}