diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e573a3de22e..f5e9ea131b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -979,6 +979,9 @@ Release 2.8.0 - UNRELEASED HDFS-8939. Test(S)WebHdfsFileContextMainOperations failing on branch-2. (Chris Nauroth via jghoman) + HDFS-8581. ContentSummary on / skips further counts on yielding lock + (J.Andreina via vinayakumarb) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 5c33c02a610..21fe313ca9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -660,7 +660,7 @@ public class INodeDirectory extends INodeWithAdditionalFields continue; } // The locks were released and reacquired. Check parent first. - if (getParent() == null) { + if (!isRoot() && getParent() == null) { // Stop further counting and return whatever we have so far. break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index f41893bc460..e68e4d69230 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -23,12 +23,14 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; +import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; import java.security.PrivilegedExceptionAction; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; @@ -943,4 +945,34 @@ public class TestQuota { assertTrue(errOutput.contains(StorageType.getTypesSupportingQuota() .toString())); } + + /** + * File count on root , should return total value of files in Filesystem + * when one folder contains files more than "dfs.content-summary.limit". + */ + @Test + public void testHugeFileCount() throws IOException { + MiniDFSCluster cluster = null; + Configuration conf = new Configuration(); + conf.setInt("dfs.content-summary.limit", 4); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + for (int i = 1; i <= 5; i++) { + FSDataOutputStream out = + dfs.create(new Path("/Folder1/" + "file" + i),(short)1); + out.close(); + } + FSDataOutputStream out = dfs.create(new Path("/Folder2/file6"),(short)1); + out.close(); + ContentSummary contentSummary = dfs.getContentSummary(new Path("/")); + assertEquals(6, contentSummary.getFileCount()); + } finally { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + } + }