diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index ce654b789f3..67c86b37578 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -873,8 +873,18 @@ public class INodeFile extends INodeWithAdditionalFields counts.addContent(Content.FILE, 1); final long fileLen = computeFileSize(snapshotId); counts.addContent(Content.LENGTH, fileLen); - counts.addContent(Content.DISKSPACE, storagespaceConsumed(null) - .getStorageSpace()); + + FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); + if (sf == null) { + counts.addContent(Content.DISKSPACE, + storagespaceConsumed(null).getStorageSpace()); + } else if (isStriped()) { + counts.addContent(Content.DISKSPACE, + storagespaceConsumedStriped().getStorageSpace()); + } else { + long diskSpaceQuota = getDiskSpaceQuota(counts, sf, snapshotId); + counts.addContent(Content.DISKSPACE, diskSpaceQuota); + } if (getStoragePolicyID() != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){ BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite(). @@ -890,6 +900,34 @@ public class INodeFile extends INodeWithAdditionalFields return summary; } + /** + * Compute disk space consumed by all the blocks in snapshots. + */ + private long getDiskSpaceQuota(ContentCounts counts, + FileWithSnapshotFeature sf, int lastSnapshotId) { + FileDiffList fileDiffList = sf.getDiffs(); + int last = fileDiffList.getLastSnapshotId(); + + if (lastSnapshotId == Snapshot.CURRENT_STATE_ID + || last == Snapshot.CURRENT_STATE_ID) { + return storagespaceConsumed(null).getStorageSpace(); + } + + final long ssDeltaNoReplication; + short replication; + + if (last < lastSnapshotId) { + ssDeltaNoReplication = computeFileSize(true, false); + replication = getFileReplication(); + } else { + int sid = fileDiffList.getSnapshotById(lastSnapshotId); + ssDeltaNoReplication = computeFileSize(sid); + replication = getFileReplication(sid); + } + + return ssDeltaNoReplication * replication; + } + /** The same as computeFileSize(null). */ public final long computeFileSize() { return computeFileSize(CURRENT_STATE_ID); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index cb4b3c37f39..589bbb18058 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -2473,4 +2473,28 @@ public class TestRenameWithSnapshots { output.println(b); return b; } -} + + /** + * Test getContentsummary and getQuotausage for an INodeReference. + */ + @Test(timeout = 300000) + public void testQuotaForRenameFileInSnapshot() throws Exception { + final Path snapshotDir = new Path("/testRenameWithSnapshot"); + hdfs.mkdirs(snapshotDir, new FsPermission((short) 0777)); + final Path file = new Path(snapshotDir, "file"); + DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED); + hdfs.allowSnapshot(snapshotDir); + hdfs.createSnapshot(snapshotDir, "s0"); + hdfs.mkdirs(new Path("/dir1")); + + // Truncate a file which exists in snapshot , that is an + // INodeReference + hdfs.truncate(file, 10); + hdfs.rename(file, new Path("/dir1")); + assertEquals(hdfs.getContentSummary(new Path("/")).getSpaceConsumed(), + hdfs.getQuotaUsage(new Path("/")).getSpaceConsumed()); + assertEquals( + hdfs.getContentSummary(new Path("/")).getFileAndDirectoryCount(), + hdfs.getQuotaUsage(new Path("/")).getFileAndDirectoryCount()); + } +} \ No newline at end of file