HDFS-15332. Quota Space consumed was wrong in truncate with Snapshots. Contributed by hemanthboyina.

This commit is contained in:
Inigo Goiri 2020-05-06 13:18:32 -07:00
parent 92e3ebb401
commit 130f89e068
2 changed files with 46 additions and 0 deletions

View File

@ -163,6 +163,12 @@ public void updateQuotaAndCollectBlocks(INode.ReclaimContext reclaimContext,
if (removed.getBlocks() != null) {
allBlocks.addAll(Arrays.asList(removed.getBlocks()));
}
for (FileDiff diff : diffs) {
BlockInfo[] diffBlocks = diff.getBlocks();
if (diffBlocks != null) {
allBlocks.addAll(Arrays.asList(diffBlocks));
}
}
for (BlockInfo b: allBlocks) {
short replication = b.getReplication();
long blockSize = b.isComplete() ? b.getNumBytes() : file

View File

@ -1353,4 +1353,44 @@ public void testConcatOnInodeRefernce() throws IOException {
fs.concat(trg, srcs);
assertEquals(1, fs.getContentSummary(new Path(dir)).getFileCount());
}
/**
* Test Quota space consumed with multiple snapshots.
*/
@Test
public void testQuotaSpaceConsumedWithSnapshots() throws IOException {
Path root = new Path("/");
Path dir = new Path(root, "dir");
fs.mkdirs(dir);
fs.allowSnapshot(dir);
// create a file
Path file2 = new Path(dir, "file2");
DFSTestUtil.createFile(fs, file2, 30, (short) 1, 0);
// create a snapshot and truncate the file
fs.createSnapshot(dir, "s1");
boolean isReady = fs.truncate(file2, 20);
if (!isReady) {
checkBlockRecovery(file2);
}
// create one more snapshot and truncate the file which exists in previous
// snapshot
fs.createSnapshot(dir, "s2");
isReady = fs.truncate(file2, 10);
if (!isReady) {
checkBlockRecovery(file2);
}
// delete the snapshots and check quota space consumed usage
fs.deleteSnapshot(dir, "s1");
fs.deleteSnapshot(dir, "s2");
assertEquals(fs.getContentSummary(root).getSpaceConsumed(),
fs.getQuotaUsage(root).getSpaceConsumed());
fs.delete(dir, true);
assertEquals(fs.getContentSummary(root).getSpaceConsumed(),
fs.getQuotaUsage(root).getSpaceConsumed());
}
}