HDFS-16428. Source path with storagePolicy cause wrong typeConsumed while rename (#3898). Contributed by lei w.

Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
Signed-off-by: He Xiaoqiao <hexiaoqiao@apache.org>
(cherry picked from commit 43153e80cb)
This commit is contained in:
Thinker313 2022-01-25 15:26:18 +08:00 committed by He Xiaoqiao
parent c531613cc9
commit 6f815a546a
No known key found for this signature in database
GPG Key ID: A80CC124E9A0FA63
4 changed files with 59 additions and 2 deletions

View File

@ -79,8 +79,12 @@ class FSDirRenameOp {
// Assume dstParent existence check done by callers. // Assume dstParent existence check done by callers.
INode dstParent = dst.getINode(-2); INode dstParent = dst.getINode(-2);
// Use the destination parent's storage policy for quota delta verify. // Use the destination parent's storage policy for quota delta verify.
final boolean isSrcSetSp = src.getLastINode().isSetStoragePolicy();
final byte storagePolicyID = isSrcSetSp ?
src.getLastINode().getLocalStoragePolicyID() :
dstParent.getStoragePolicyID();
final QuotaCounts delta = src.getLastINode() final QuotaCounts delta = src.getLastINode()
.computeQuotaUsage(bsps, dstParent.getStoragePolicyID(), false, .computeQuotaUsage(bsps, storagePolicyID, false,
Snapshot.CURRENT_STATE_ID); Snapshot.CURRENT_STATE_ID);
// Reduce the required quota by dst that is being removed // Reduce the required quota by dst that is being removed

View File

@ -1321,9 +1321,13 @@ public class FSDirectory implements Closeable {
// always verify inode name // always verify inode name
verifyINodeName(inode.getLocalNameBytes()); verifyINodeName(inode.getLocalNameBytes());
final boolean isSrcSetSp = inode.isSetStoragePolicy();
final byte storagePolicyID = isSrcSetSp ?
inode.getLocalStoragePolicyID() :
parent.getStoragePolicyID();
final QuotaCounts counts = inode final QuotaCounts counts = inode
.computeQuotaUsage(getBlockStoragePolicySuite(), .computeQuotaUsage(getBlockStoragePolicySuite(),
parent.getStoragePolicyID(), false, Snapshot.CURRENT_STATE_ID); storagePolicyID, false, Snapshot.CURRENT_STATE_ID);
updateCount(existing, pos, counts, checkQuota); updateCount(existing, pos, counts, checkQuota);
boolean isRename = (inode.getParent() != null); boolean isRename = (inode.getParent() != null);

View File

@ -340,6 +340,16 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
return false; return false;
} }
/**
* Check if this inode itself has a storage policy set.
*/
public boolean isSetStoragePolicy() {
if (isSymlink()) {
return false;
}
return getLocalStoragePolicyID() != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
/** Cast this inode to an {@link INodeFile}. */ /** Cast this inode to an {@link INodeFile}. */
public INodeFile asFile() { public INodeFile asFile() {
throw new IllegalStateException("Current inode is not a file: " throw new IllegalStateException("Current inode is not a file: "

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.impl.LeaseRenewer; import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -958,6 +959,44 @@ public class TestQuota {
6 * fileSpace); 6 * fileSpace);
} }
@Test
public void testRenameInodeWithStorageType() throws IOException {
final int size = 64;
final short repl = 1;
final Path foo = new Path("/foo");
final Path bs1 = new Path(foo, "bs1");
final Path wow = new Path(bs1, "wow");
final Path bs2 = new Path(foo, "bs2");
final Path wow2 = new Path(bs2, "wow2");
final Path wow3 = new Path(bs2, "wow3");
dfs.mkdirs(bs1, FsPermission.getDirDefault());
dfs.mkdirs(bs2, FsPermission.getDirDefault());
dfs.setQuota(bs1, 1000, 434217728);
dfs.setQuota(bs2, 1000, 434217728);
// file wow3 without storage policy
DFSTestUtil.createFile(dfs, wow3, size, repl, 0);
dfs.setStoragePolicy(bs2, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
DFSTestUtil.createFile(dfs, wow, size, repl, 0);
DFSTestUtil.createFile(dfs, wow2, size, repl, 0);
assertTrue("Without storage policy, typeConsumed should be 0.",
dfs.getQuotaUsage(bs1).getTypeConsumed(StorageType.SSD) == 0);
assertTrue("With storage policy, typeConsumed should not be 0.",
dfs.getQuotaUsage(bs2).getTypeConsumed(StorageType.SSD) != 0);
// wow3 without storage policy , rename will not change typeConsumed
dfs.rename(wow3, bs1);
assertTrue("Rename src without storagePolicy, dst typeConsumed should not be changed.",
dfs.getQuotaUsage(bs2).getTypeConsumed(StorageType.SSD) == 0);
long srcTypeQuota = dfs.getQuotaUsage(bs2).getTypeQuota(StorageType.SSD);
dfs.rename(bs2, bs1);
long dstTypeQuota = dfs.getQuotaUsage(bs1).getTypeConsumed(StorageType.SSD);
assertTrue("Rename with storage policy, typeConsumed should not be 0.",
dstTypeQuota != srcTypeQuota);
}
private static void checkContentSummary(final ContentSummary expected, private static void checkContentSummary(final ContentSummary expected,
final ContentSummary computed) { final ContentSummary computed) {
assertEquals(expected.toString(), computed.toString()); assertEquals(expected.toString(), computed.toString());