diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index 68cf3e7698a..6ca6568ea2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -76,7 +76,12 @@ class FSDirRenameOp { while(src.getINode(i) == dst.getINode(i)) { i++; } // src[i - 1] is the last common ancestor. BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite(); - final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps); + // Assume dstParent existence check done by callers. + INode dstParent = dst.getINode(-2); + // Use the destination parent's storage policy for quota delta verify. + final QuotaCounts delta = src.getLastINode() + .computeQuotaUsage(bsps, dstParent.getStoragePolicyID(), false, + Snapshot.CURRENT_STATE_ID); // Reduce the required quota by dst that is being removed final INode dstINode = dst.getLastINode(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index c49e00f2b54..d0ffb3b2294 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.util.StringUtils; import com.google.common.annotations.VisibleForTesting; @@ -1298,7 +1299,9 @@ public class FSDirectory implements Closeable { // always verify inode name verifyINodeName(inode.getLocalNameBytes()); - final QuotaCounts counts = inode.computeQuotaUsage(getBlockStoragePolicySuite()); + final QuotaCounts counts = inode + .computeQuotaUsage(getBlockStoragePolicySuite(), + parent.getStoragePolicyID(), false, Snapshot.CURRENT_STATE_ID); updateCount(existing, pos, counts, checkQuota); boolean isRename = (inode.getParent() != null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index 1c4855fc701..b63813b6ffe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -1565,6 +1565,57 @@ public class TestQuota { assertEquals(0, cluster.getNamesystem().getNumFilesUnderConstruction()); } + @Test + public void testClrQuotaOnRoot() throws Exception { + long orignalQuota = dfs.getQuotaUsage(new Path("/")).getQuota(); + DFSAdmin admin = new DFSAdmin(conf); + String[] args; + args = new String[] {"-setQuota", "3K", "/"}; + runCommand(admin, args, false); + assertEquals(3 * 1024, dfs.getQuotaUsage(new Path("/")).getQuota()); + args = new String[] {"-clrQuota", "/"}; + runCommand(admin, args, false); + assertEquals(orignalQuota, dfs.getQuotaUsage(new Path("/")).getQuota()); + } + + @Test + public void testRename() throws Exception { + int fileLen = 1024; + short replication = 3; + + final Path parent = new Path(PathUtils.getTestDir(getClass()).getPath(), + GenericTestUtils.getMethodName()); + assertTrue(dfs.mkdirs(parent)); + + final Path srcDir = new Path(parent, "src-dir"); + Path file = new Path(srcDir, "file1"); + DFSTestUtil.createFile(dfs, file, fileLen, replication, 0); + dfs.setStoragePolicy(srcDir, HdfsConstants.HOT_STORAGE_POLICY_NAME); + + final Path dstDir = new Path(parent, "dst-dir"); + assertTrue(dfs.mkdirs(dstDir)); + dfs.setStoragePolicy(dstDir, HdfsConstants.ALLSSD_STORAGE_POLICY_NAME); + + dfs.setQuota(srcDir, 100000, 100000); + dfs.setQuota(dstDir, 100000, 100000); + + Path dstFile = new Path(dstDir, "file1"); + // Test quota check of rename. Expect a QuotaExceedException. + dfs.setQuotaByStorageType(dstDir, StorageType.SSD, 10); + try { + dfs.rename(file, dstFile); + fail("Expect QuotaExceedException."); + } catch (QuotaExceededException qe) { + } + + // Set enough quota, expect a successful rename. + dfs.setQuotaByStorageType(dstDir, StorageType.SSD, fileLen * replication); + dfs.rename(file, dstFile); + // Verify the storage type usage is properly updated on source and dst. + checkQuotaAndCount(dfs, srcDir); + checkQuotaAndCount(dfs, dstDir); + } + @Test public void testSpaceQuotaExceptionOnAppend() throws Exception { GenericTestUtils.setLogLevel(DFSOutputStream.LOG, Level.TRACE); @@ -1635,4 +1686,15 @@ public class TestQuota { } scanner.close(); } + + // quota and count should match. + private void checkQuotaAndCount(DistributedFileSystem fs, Path path) + throws IOException { + QuotaUsage qu = fs.getQuotaUsage(path); + ContentSummary cs = fs.getContentSummary(path); + for (StorageType st : StorageType.values()) { + // it will fail here, because the quota and consume is not handled right. + assertEquals(qu.getTypeConsumed(st), cs.getTypeConsumed(st)); + } + } }