HDFS-9360. Storage type usage isn't updated properly after file deletion. Contributed by Ming Ma.
(cherry picked from commit ea5bb48326
)
This commit is contained in:
parent
81f7e8af22
commit
891be39993
|
@ -1390,6 +1390,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-9357. NN UI renders icons of decommissioned DN incorrectly.
|
HDFS-9357. NN UI renders icons of decommissioned DN incorrectly.
|
||||||
(Surendra Singh Lilhore via wheat9)
|
(Surendra Singh Lilhore via wheat9)
|
||||||
|
|
||||||
|
HDFS-9360. Storage type usage isn't updated properly after file deletion.
|
||||||
|
(Ming Ma via xyao)
|
||||||
|
|
||||||
Release 2.7.3 - UNRELEASED
|
Release 2.7.3 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -568,7 +568,9 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
byte blockStoragePolicyId, boolean useCache, int lastSnapshotId) {
|
byte blockStoragePolicyId, boolean useCache, int lastSnapshotId) {
|
||||||
final QuotaCounts counts = new QuotaCounts.Builder().nameSpace(1).build();
|
final QuotaCounts counts = new QuotaCounts.Builder().nameSpace(1).build();
|
||||||
|
|
||||||
final BlockStoragePolicy bsp = bsps.getPolicy(blockStoragePolicyId);
|
final BlockStoragePolicy bsp = (blockStoragePolicyId ==
|
||||||
|
BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) ? null :
|
||||||
|
bsps.getPolicy(blockStoragePolicyId);
|
||||||
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
|
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
|
||||||
if (sf == null) {
|
if (sf == null) {
|
||||||
counts.add(storagespaceConsumed(bsp));
|
counts.add(storagespaceConsumed(bsp));
|
||||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||||
|
@ -573,7 +574,6 @@ public class TestQuota {
|
||||||
c = dfs.getContentSummary(quotaDir20);
|
c = dfs.getContentSummary(quotaDir20);
|
||||||
assertEquals(c.getSpaceQuota(), 6 * fileSpace);
|
assertEquals(c.getSpaceQuota(), 6 * fileSpace);
|
||||||
|
|
||||||
|
|
||||||
// Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
|
// Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
|
||||||
final Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21");
|
final Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21");
|
||||||
assertTrue(dfs.mkdirs(quotaDir21));
|
assertTrue(dfs.mkdirs(quotaDir21));
|
||||||
|
@ -785,6 +785,59 @@ public class TestQuota {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test quota by storage type.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testQuotaByStorageType() throws Exception {
|
||||||
|
final Configuration conf = new HdfsConfiguration();
|
||||||
|
// set a smaller block size so that we can test with smaller
|
||||||
|
// diskspace quotas
|
||||||
|
conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512");
|
||||||
|
// Make it relinquish locks. When run serially, the result should
|
||||||
|
// be identical.
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
|
||||||
|
final MiniDFSCluster cluster =
|
||||||
|
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||||
|
final FileSystem fs = cluster.getFileSystem();
|
||||||
|
assertTrue("Not a HDFS: " + fs.getUri(),
|
||||||
|
fs instanceof DistributedFileSystem);
|
||||||
|
final DistributedFileSystem dfs = (DistributedFileSystem) fs;
|
||||||
|
|
||||||
|
try {
|
||||||
|
int fileLen = 1024;
|
||||||
|
short replication = 3;
|
||||||
|
int fileSpace = fileLen * replication;
|
||||||
|
|
||||||
|
final Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20");
|
||||||
|
assertTrue(dfs.mkdirs(quotaDir20));
|
||||||
|
dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 6 * fileSpace);
|
||||||
|
|
||||||
|
// Verify DirectoryWithQuotaFeature's storage type usage
|
||||||
|
// is updated properly after deletion.
|
||||||
|
// File creation followed by deletion shouldn't change storage type
|
||||||
|
// usage regardless whether storage policy is set.
|
||||||
|
Path file = new Path(quotaDir20, "fileDir/file1");
|
||||||
|
DFSTestUtil.createFile(dfs, file, fileLen * 3, replication, 0);
|
||||||
|
dfs.delete(file, false);
|
||||||
|
dfs.setStoragePolicy(quotaDir20, HdfsConstants.HOT_STORAGE_POLICY_NAME);
|
||||||
|
dfs.setQuotaByStorageType(quotaDir20, StorageType.DEFAULT,
|
||||||
|
2 * fileSpace);
|
||||||
|
boolean hasException = false;
|
||||||
|
try {
|
||||||
|
DFSTestUtil.createFile(dfs, file, fileLen * 3, replication, 0);
|
||||||
|
} catch (QuotaByStorageTypeExceededException e) {
|
||||||
|
hasException = true;
|
||||||
|
}
|
||||||
|
assertTrue(hasException);
|
||||||
|
dfs.delete(file, false);
|
||||||
|
dfs.setQuotaByStorageType(quotaDir20, StorageType.DEFAULT,
|
||||||
|
6 * fileSpace);
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static void checkContentSummary(final ContentSummary expected,
|
private static void checkContentSummary(final ContentSummary expected,
|
||||||
final ContentSummary computed) {
|
final ContentSummary computed) {
|
||||||
assertEquals(expected.toString(), computed.toString());
|
assertEquals(expected.toString(), computed.toString());
|
||||||
|
|
Loading…
Reference in New Issue