diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 50192ae63b9..2cf9509791f 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -899,6 +899,9 @@ Release 2.8.0 - UNRELEASED HADOOP-12089. StorageException complaining " no lease ID" when updating FolderLastModifiedTime in WASB. (Duo Xu via cnauroth) + HADOOP-12154. FileSystem#getUsed() returns the file length only from root '/' + (J.Andreina via vinayakumarb) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 1d7bc875cc3..c98074ae6d0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -2085,9 +2085,9 @@ public abstract class FileSystem extends Configured implements Closeable { /** Return the total size of all files in the filesystem.*/ public long getUsed() throws IOException{ long used = 0; - FileStatus[] files = listStatus(new Path("/")); - for(FileStatus file:files){ - used += file.getLen(); + RemoteIterator files = listFiles(new Path("/"), true); + while (files.hasNext()) { + used += files.next().getLen(); } return used; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 2df31c42e29..13861241a61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -913,6 +913,32 @@ public class TestDFSShell { cluster.shutdown(); } } + + @Test(timeout = 30000) + public void testTotalSizeOfAllFiles() throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + FileSystem fs = cluster.getFileSystem(); + // create file under root + FSDataOutputStream File1 = fs.create(new Path("/File1")); + File1.write("hi".getBytes()); + File1.close(); + // create file under sub-folder + FSDataOutputStream File2 = fs.create(new Path("/Folder1/File2")); + File2.write("hi".getBytes()); + File2.close(); + // getUsed() should return total length of all the files in Filesystem + assertEquals(4, fs.getUsed()); + } finally { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + } + private static void runCount(String path, long dirs, long files, FsShell shell ) throws IOException { ByteArrayOutputStream bytes = new ByteArrayOutputStream();