From ab8ce0fcda778ce34c8c48c08c8af9d461a78e91 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Thu, 2 May 2013 06:48:36 +0000 Subject: [PATCH] HDFS-4785. Merge change r1478267 from trunk git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1478286 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/FSDirectory.java | 1 + .../hdfs/server/namenode/TestINodeFile.java | 21 ++++++++++++++++++- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4be73fb0ffb..d864198942b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -224,6 +224,9 @@ Release 2.0.5-beta - UNRELEASED HDFS-4748. MiniJournalCluster#restartJournalNode leaks resources, which causes sporadic test failures. (Chris Nauroth via suresh) + HDFS-4785. Concat operation does not remove concatenated files from + InodeMap. (suresh) + Release 2.0.4-alpha - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index a9ffad0632e..04ab803323b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -971,6 +971,7 @@ public void unprotectedConcat(String target, String [] srcs, long timestamp) nodeToRemove.setBlocks(null); trgParent.removeChild(nodeToRemove); + inodeMap.remove(nodeToRemove); count++; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 35ffb52e1de..bf693db3b14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -443,8 +443,27 @@ public void testInodeId() throws IOException { assertTrue(fs.delete(renamedPath, true)); inodeCount -= 2; assertEquals(inodeCount, fsn.dir.getInodeMapSize()); + + // Create and concat /test/file1 /test/file2 + // Create /test1/file1 and /test1/file2 + String file1 = "/test1/file1"; + String file2 = "/test1/file2"; + DFSTestUtil.createFile(fs, new Path(file1), 512, (short) 1, 0); + DFSTestUtil.createFile(fs, new Path(file2), 512, (short) 1, 0); + inodeCount += 3; // test1, file1 and file2 are created + expectedLastInodeId += 3; + assertEquals(inodeCount, fsn.dir.getInodeMapSize()); + assertEquals(expectedLastInodeId, fsn.getLastInodeId()); + // Concat the /test1/file1 /test1/file2 into /test1/file2 + nnrpc.concat(file2, new String[] {file1}); + inodeCount--; // file1 and file2 are concatenated to file2 + assertEquals(inodeCount, fsn.dir.getInodeMapSize()); + assertEquals(expectedLastInodeId, fsn.getLastInodeId()); + assertTrue(fs.delete(new Path("/test1"), true)); + inodeCount -= 2; // test1 and file2 is deleted + assertEquals(inodeCount, fsn.dir.getInodeMapSize()); - // Make sure empty editlog can be handled + // Make sure editlog is loaded correctly cluster.restartNameNode(); cluster.waitActive(); fsn = cluster.getNamesystem();